forked from Qortal/qortal
Work on auto-update + repository path + Settings bugfixes
AutoUpdate needs separate logfiles for its process as log4j2's rolling appender doesn't support locking. So we create AU_LOGGER in AutoUpdate and set it up using static { } block. Added support for starting/querying/stopping auto-update Windows service. Changed Controller's public connectionUrl field to getRepositoryUrl() public static method so it can incorporate repositoryPath from Settings. Controller has added support for auto-update on startup. Fix bugs with Settings not using userPath properly. Removed obsolete standalone binary classes: * blockgenerator * txhex * v1feeder
This commit is contained in:
parent
e4482c5ade
commit
63a36073ec
@ -1,7 +1,9 @@
|
||||
package org.qora;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
@ -11,10 +13,8 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlAnyElement;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.qora.api.ApiRequest;
|
||||
@ -28,11 +28,25 @@ import com.google.common.hash.HashCode;
|
||||
|
||||
public class AutoUpdate {
|
||||
|
||||
static {
|
||||
// This static block will be called before others if using AutoUpdate.main()
|
||||
|
||||
// Log into different files for auto-update
|
||||
System.setProperty("log4j2.filenameTemplate", "log-auto-update.txt");
|
||||
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
}
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(AutoUpdate.class);
|
||||
private static final Logger AU_LOGGER = LogManager.getLogger("auto-update");
|
||||
|
||||
private static final String JAR_FILENAME = "qora-core.jar";
|
||||
private static final String NODE_EXE = "qora-core.exe";
|
||||
private static final String SERVICE_NAME = "qora-core auto-update.exe";
|
||||
|
||||
private static final long CHECK_INTERVAL = 1 * 1000;
|
||||
private static final int MAX_ATTEMPTS = 10;
|
||||
private static final long CHECK_INTERVAL = 1 * 1000; // ms
|
||||
private static final int MAX_ATTEMPTS = 5;
|
||||
|
||||
private static final Map<String, String> ARBITRARY_PARAMS = new HashMap<>();
|
||||
static {
|
||||
@ -43,19 +57,8 @@ public class AutoUpdate {
|
||||
ARBITRARY_PARAMS.put("reverse", "true");
|
||||
}
|
||||
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
}
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public static class Transactions {
|
||||
@XmlAnyElement(lax = true)
|
||||
public List<TransactionData> transactions;
|
||||
|
||||
public Transactions() {
|
||||
}
|
||||
}
|
||||
private static volatile boolean stopRequested = false;
|
||||
private static String BASE_URI;
|
||||
|
||||
public static void main(String[] args) {
|
||||
Security.insertProviderAt(new BouncyCastleProvider(), 0);
|
||||
@ -64,11 +67,13 @@ public class AutoUpdate {
|
||||
// Load/check settings, which potentially sets up blockchain config, etc.
|
||||
Settings.getInstance();
|
||||
|
||||
final String BASE_URI = "http://localhost:" + Settings.getInstance().getApiPort() + "/";
|
||||
BASE_URI = "http://localhost:" + Settings.getInstance().getApiPort() + "/";
|
||||
AU_LOGGER.info(String.format("Starting auto-update service using API via %s", BASE_URI));
|
||||
|
||||
Long buildTimestamp = null; // ms
|
||||
int failureCount = 0;
|
||||
|
||||
while (true) {
|
||||
while (!stopRequested && failureCount < MAX_ATTEMPTS) {
|
||||
try {
|
||||
Thread.sleep(CHECK_INTERVAL);
|
||||
} catch (InterruptedException e) {
|
||||
@ -79,69 +84,112 @@ public class AutoUpdate {
|
||||
if (buildTimestamp == null) {
|
||||
// Grab node version and timestamp
|
||||
Object response = ApiRequest.perform(BASE_URI + "admin/info", NodeInfo.class, null);
|
||||
if (response == null || !(response instanceof NodeInfo))
|
||||
if (response == null || !(response instanceof NodeInfo)) {
|
||||
++failureCount;
|
||||
continue;
|
||||
}
|
||||
|
||||
NodeInfo nodeInfo = (NodeInfo) response;
|
||||
buildTimestamp = nodeInfo.buildTimestamp * 1000L;
|
||||
AU_LOGGER.info(String.format("Node's build info: version %s built %s", nodeInfo.buildVersion, nodeInfo.buildTimestamp));
|
||||
|
||||
// API access success
|
||||
failureCount = 0;
|
||||
}
|
||||
|
||||
// Look for "update" tx which is arbitrary tx with service 1 and timestamp later than buildTimestamp
|
||||
// http://localhost:9085/arbitrary/search?txGroupId=1&service=1&confirmationStatus=CONFIRMED&limit=1&reverse=true
|
||||
Object response = ApiRequest.perform(BASE_URI + "arbitrary/search", TransactionData.class, ARBITRARY_PARAMS);
|
||||
if (response == null || !(response instanceof List<?>))
|
||||
if (response == null || !(response instanceof List<?>)) {
|
||||
++failureCount;
|
||||
continue;
|
||||
}
|
||||
|
||||
List<?> listResponse = (List<?>) response;
|
||||
if (listResponse.isEmpty() || !(listResponse.get(0) instanceof TransactionData))
|
||||
if (listResponse.isEmpty())
|
||||
// Not a failure - just no matching transactions yet
|
||||
continue;
|
||||
|
||||
if (!(listResponse.get(0) instanceof TransactionData)) {
|
||||
++failureCount;
|
||||
continue;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
TransactionData transactionData = ((List<TransactionData>) listResponse).get(0);
|
||||
// API access success
|
||||
failureCount = 0;
|
||||
|
||||
if (transactionData.getTimestamp() <= buildTimestamp)
|
||||
continue;
|
||||
|
||||
ArbitraryTransactionData arbitraryTxData = (ArbitraryTransactionData) transactionData;
|
||||
AU_LOGGER.info(String.format("Found update ARBITRARY transaction %s", Base58.encode(arbitraryTxData.getSignature())));
|
||||
|
||||
// Arbitrary transaction's data contains git commit hash needed to grab JAR:
|
||||
// https://github.com/catbref/qora-core/blob/cf86b5f3ce828f75cb18db1b685f2d9e29630d77/qora-core.jar
|
||||
InputStream in = ApiRequest.fetchStream(BASE_URI + "arbitrary/raw/" + Base58.encode(arbitraryTxData.getSignature()));
|
||||
if (in == null)
|
||||
if (in == null) {
|
||||
AU_LOGGER.warn(String.format("Failed to fetch raw ARBITRARY transaction %s", Base58.encode(arbitraryTxData.getSignature())));
|
||||
++failureCount;
|
||||
continue;
|
||||
}
|
||||
|
||||
byte[] commitHash = new byte[20];
|
||||
try {
|
||||
in.read(commitHash);
|
||||
} catch (IOException e) {
|
||||
AU_LOGGER.warn(String.format("Failed to fetch raw ARBITRARY transaction %s", Base58.encode(arbitraryTxData.getSignature())));
|
||||
++failureCount;
|
||||
continue;
|
||||
}
|
||||
|
||||
AU_LOGGER.info(String.format("Update's git commit hash: %s", HashCode.fromBytes(commitHash).toString()));
|
||||
|
||||
String[] autoUpdateRepos = Settings.getInstance().getAutoUpdateRepos();
|
||||
for (String repo : autoUpdateRepos)
|
||||
if (attemptUpdate(commitHash, repo, BASE_URI))
|
||||
if (attemptUpdate(commitHash, repo))
|
||||
break;
|
||||
|
||||
// Reset cached node info in case we've updated
|
||||
buildTimestamp = null;
|
||||
}
|
||||
// API access success
|
||||
failureCount = 0;
|
||||
}
|
||||
|
||||
private static boolean attemptUpdate(byte[] commitHash, String repoBaseUri, String BASE_URI) {
|
||||
if (failureCount >= MAX_ATTEMPTS)
|
||||
AU_LOGGER.warn("Stopping auto-update service due to API failures");
|
||||
else
|
||||
AU_LOGGER.info("Stopping auto-update service");
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
AU_LOGGER.info("Service STOP requested");
|
||||
stopRequested = true;
|
||||
}
|
||||
|
||||
private static boolean attemptUpdate(byte[] commitHash, String repoBaseUri) {
|
||||
Path realJar = Paths.get(System.getProperty("user.dir"), JAR_FILENAME);
|
||||
Path oldJar = Paths.get(System.getProperty("user.dir"), "old-" + JAR_FILENAME);
|
||||
|
||||
AU_LOGGER.info(String.format("Fetching update from %s", repoBaseUri));
|
||||
InputStream in = ApiRequest.fetchStream(repoBaseUri + "/raw/" + HashCode.fromBytes(commitHash).toString() + "/" + JAR_FILENAME);
|
||||
if (in == null) {
|
||||
AU_LOGGER.info(String.format("Failed to fetch update from %s", repoBaseUri));
|
||||
return false; // failed - try another repo
|
||||
}
|
||||
|
||||
Path tmpJar = null;
|
||||
InputStream in = ApiRequest.fetchStream(repoBaseUri + "/raw/" + HashCode.fromBytes(commitHash).toString() + "/" + JAR_FILENAME);
|
||||
if (in == null)
|
||||
return false;
|
||||
|
||||
try {
|
||||
// Save input stream into temporary file
|
||||
tmpJar = Files.createTempFile(JAR_FILENAME + "-", null);
|
||||
AU_LOGGER.debug(String.format("Saving update from %s into %s", repoBaseUri, tmpJar.toString()));
|
||||
Files.copy(in, tmpJar, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
// Keep trying to shutdown node
|
||||
for (int i = 0; i < MAX_ATTEMPTS; ++i) {
|
||||
int attempt;
|
||||
for (attempt = 0; attempt < MAX_ATTEMPTS; ++attempt) {
|
||||
AU_LOGGER.info(String.format("Attempt #%d out of %d to shutdown node", attempt + 1, MAX_ATTEMPTS));
|
||||
String response = ApiRequest.perform(BASE_URI + "admin/stop", null);
|
||||
if (response == null || !response.equals("true"))
|
||||
break;
|
||||
@ -153,11 +201,26 @@ public class AutoUpdate {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (attempt == MAX_ATTEMPTS) {
|
||||
AU_LOGGER.warn("Failed to shut down node - giving up");
|
||||
return true; // repo worked, even if we couldn't shut down node
|
||||
}
|
||||
|
||||
// Rename current JAR to 'old' name so we can keep running as Windows locks running JAR
|
||||
// The move downloaded JAR into new position
|
||||
try {
|
||||
Files.deleteIfExists(oldJar);
|
||||
Files.move(realJar, oldJar);
|
||||
try {
|
||||
Files.move(tmpJar, realJar, StandardCopyOption.REPLACE_EXISTING);
|
||||
} catch (IOException e) {
|
||||
// Put old jar back for now
|
||||
AU_LOGGER.warn(String.format("Failed to move downloaded JAR into position: %s", e.getMessage()));
|
||||
Files.move(oldJar, realJar, StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Failed to replace but we still need to restart node
|
||||
AU_LOGGER.warn(String.format("Failed to replace JAR: %s", e.getMessage()));
|
||||
}
|
||||
|
||||
// Restart node!
|
||||
@ -173,6 +236,7 @@ public class AutoUpdate {
|
||||
Files.deleteIfExists(tmpJar);
|
||||
} catch (IOException e) {
|
||||
// we tried...
|
||||
AU_LOGGER.warn(String.format("Failed to delete downloaded JAR: %s", e.getMessage()));
|
||||
}
|
||||
|
||||
}
|
||||
@ -181,9 +245,108 @@ public class AutoUpdate {
|
||||
private static void restartNode() {
|
||||
try {
|
||||
Path execPath = Paths.get(System.getProperty("user.dir"), NODE_EXE);
|
||||
AU_LOGGER.info(String.format("Restarting node via %s", execPath.toString()));
|
||||
new ProcessBuilder(execPath.toString()).start();
|
||||
|
||||
// Check node is alive
|
||||
int attempt;
|
||||
for (attempt = 0; attempt < MAX_ATTEMPTS; ++attempt) {
|
||||
AU_LOGGER.debug(String.format("Attempt #%d out of %d to contact node", attempt + 1, MAX_ATTEMPTS));
|
||||
String response = ApiRequest.perform(BASE_URI + "admin/info", null);
|
||||
if (response != null)
|
||||
break;
|
||||
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e) {
|
||||
// We still need to check...
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (attempt == MAX_ATTEMPTS) {
|
||||
AU_LOGGER.warn("Failed to restart node - giving up");
|
||||
stopRequested = true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
|
||||
// Calls from Controller
|
||||
|
||||
// Auto-update related
|
||||
public static void controllerStart() {
|
||||
if (!Settings.getInstance().isAutoUpdateEnabled())
|
||||
return;
|
||||
|
||||
if (isAutoUpdateRunning()) {
|
||||
LOGGER.info("Stopping existing auto-update service");
|
||||
|
||||
// Stop existing auto-update
|
||||
stopAutoUpdate();
|
||||
|
||||
// Delete old JAR (if exists)
|
||||
Path oldJar = Paths.get(System.getProperty("user.dir"), "old-" + AutoUpdate.JAR_FILENAME);
|
||||
try {
|
||||
Files.deleteIfExists(oldJar);
|
||||
} catch (IOException e) {
|
||||
// We tried...
|
||||
}
|
||||
}
|
||||
|
||||
// Start auto-update
|
||||
LOGGER.info("Starting auto-update service");
|
||||
startAutoUpdate();
|
||||
}
|
||||
|
||||
private static boolean isWindows() {
|
||||
return System.getProperty("os.name").contains("Windows");
|
||||
}
|
||||
|
||||
private static boolean isAutoUpdateRunning() {
|
||||
if (isWindows()) {
|
||||
try {
|
||||
Process process = new ProcessBuilder("cmd.exe", "/c", "sc", "query", SERVICE_NAME).start();
|
||||
try (InputStream stdout = process.getInputStream()) {
|
||||
InputStreamReader inputStreamReader = new InputStreamReader(stdout);
|
||||
BufferedReader bufferedReader = new BufferedReader(inputStreamReader);
|
||||
return bufferedReader.lines().anyMatch(line -> line.contains("RUNNING"));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to query auto-update service", e);
|
||||
// Who knows...
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// TODO: unix poll auto-update
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static void stopAutoUpdate() {
|
||||
if (isWindows()) {
|
||||
try {
|
||||
new ProcessBuilder("cmd.exe", "/c", "sc", "stop", SERVICE_NAME).start();
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to send STOP to auto-update service", e);
|
||||
// Carry on regardless?
|
||||
}
|
||||
} else {
|
||||
// TODO: unix stop auto-update
|
||||
}
|
||||
}
|
||||
|
||||
private static void startAutoUpdate() {
|
||||
if (isWindows()) {
|
||||
try {
|
||||
new ProcessBuilder("cmd.exe", "/c", "sc", "start", SERVICE_NAME).start();
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to start to auto-update service", e);
|
||||
// Carry on regardless?
|
||||
}
|
||||
} else {
|
||||
// TODO: unix start auto-update
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import javax.xml.bind.UnmarshalException;
|
||||
import javax.xml.bind.Unmarshaller;
|
||||
import javax.xml.transform.stream.StreamSource;
|
||||
|
||||
import org.bouncycastle.jsse.util.CustomSSLSocketFactory;
|
||||
import org.eclipse.persistence.exceptions.XMLMarshalException;
|
||||
import org.eclipse.persistence.jaxb.JAXBContextFactory;
|
||||
import org.eclipse.persistence.jaxb.UnmarshallerProperties;
|
||||
|
@ -162,7 +162,6 @@ public class ArbitraryResource {
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/")
|
||||
@Operation(
|
||||
summary = "Build raw, unsigned, ARBITRARY transaction",
|
||||
requestBody = @RequestBody(
|
||||
|
@ -1,73 +0,0 @@
|
||||
package org.qora;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qora.block.BlockChain;
|
||||
import org.qora.block.BlockGenerator;
|
||||
import org.qora.repository.DataException;
|
||||
import org.qora.repository.RepositoryFactory;
|
||||
import org.qora.repository.RepositoryManager;
|
||||
import org.qora.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qora.utils.Base58;
|
||||
|
||||
public class blockgenerator {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(blockgenerator.class);
|
||||
public static final String connectionUrl = "jdbc:hsqldb:file:db/blockchain;create=true";
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length != 1) {
|
||||
System.err.println("usage: blockgenerator private-key-base58 | 'RANDOM'");
|
||||
System.err.println("example: blockgenerator 7Vg53HrETZZuVySMPWJnVwQESS3dV8jCXPL5GDHMCeKS");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
byte[] privateKey;
|
||||
|
||||
if (args[0].equalsIgnoreCase("RANDOM")) {
|
||||
privateKey = new byte[32];
|
||||
new SecureRandom().nextBytes(privateKey);
|
||||
} else {
|
||||
privateKey = Base58.decode(args[0]);
|
||||
}
|
||||
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(connectionUrl);
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't connect to repository", e);
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
try {
|
||||
BlockChain.validate();
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't validate repository", e);
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
BlockGenerator blockGenerator = new BlockGenerator();
|
||||
blockGenerator.start();
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
blockGenerator.shutdown();
|
||||
|
||||
try {
|
||||
blockGenerator.join();
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
try {
|
||||
RepositoryManager.closeRepositoryFactory();
|
||||
} catch (DataException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
@ -16,6 +16,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.qora.AutoUpdate;
|
||||
import org.qora.api.ApiService;
|
||||
import org.qora.block.Block;
|
||||
import org.qora.block.BlockChain;
|
||||
@ -54,13 +55,14 @@ public class Controller extends Thread {
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
}
|
||||
|
||||
public static final String connectionUrl = "jdbc:hsqldb:file:db/blockchain;create=true";
|
||||
public static final long startTime = System.currentTimeMillis();
|
||||
public static final String VERSION_PREFIX = "qora-core-";
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Controller.class);
|
||||
private static final long MISBEHAVIOUR_COOLOFF = 24 * 60 * 60 * 1000; // ms
|
||||
private static final Object shutdownLock = new Object();
|
||||
private static final String repositoryUrlTemplate = "jdbc:hsqldb:file:%s/blockchain;create=true";
|
||||
|
||||
private static boolean isStopping = false;
|
||||
private static BlockGenerator blockGenerator = null;
|
||||
private static Controller instance;
|
||||
@ -104,6 +106,10 @@ public class Controller extends Thread {
|
||||
|
||||
// Getters / setters
|
||||
|
||||
public static String getRepositoryUrl() {
|
||||
return String.format(repositoryUrlTemplate, Settings.getInstance().getRepositoryPath());
|
||||
}
|
||||
|
||||
public byte[] getMessageMagic() {
|
||||
return new byte[] {
|
||||
0x12, 0x34, 0x56, 0x78
|
||||
@ -145,7 +151,7 @@ public class Controller extends Thread {
|
||||
|
||||
LOGGER.info("Starting repository");
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(connectionUrl);
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Unable to start repository", e);
|
||||
@ -193,6 +199,9 @@ public class Controller extends Thread {
|
||||
|
||||
LOGGER.info("Starting controller");
|
||||
Controller.getInstance().start();
|
||||
|
||||
// Auto-update service
|
||||
AutoUpdate.controllerStart();
|
||||
}
|
||||
|
||||
// Main thread
|
||||
|
@ -29,7 +29,7 @@ public class orphan {
|
||||
Settings.getInstance();
|
||||
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.connectionUrl);
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
} catch (DataException e) {
|
||||
System.err.println("Couldn't connect to repository: " + e.getMessage());
|
||||
|
@ -51,6 +51,8 @@ public class Settings {
|
||||
private int maxUnconfirmedPerAccount = 100;
|
||||
/** Max milliseconds into future for accepting new, unconfirmed transactions */
|
||||
private int maxTransactionTimestampFuture = 24 * 60 * 60 * 1000; // milliseconds
|
||||
// auto-update
|
||||
private boolean autoUpdateEnabled = true;
|
||||
|
||||
// Peer-to-peer related
|
||||
private int listenPort = DEFAULT_LISTEN_PORT;
|
||||
@ -66,7 +68,7 @@ public class Settings {
|
||||
/** Queries that take longer than this are logged. (milliseconds) */
|
||||
private Long slowQueryThreshold = null;
|
||||
/** Repository storage path. */
|
||||
private String repositoryPath = null;
|
||||
private String repositoryPath = "db";
|
||||
|
||||
// Auto-update sources
|
||||
private String[] autoUpdateRepos = new String[] {
|
||||
@ -117,7 +119,7 @@ public class Settings {
|
||||
LOGGER.info("Using settings file: " + path + filename);
|
||||
|
||||
// Create the StreamSource by creating Reader to the JSON input
|
||||
try (Reader settingsReader = new FileReader(filename)) {
|
||||
try (Reader settingsReader = new FileReader(path + filename)) {
|
||||
StreamSource json = new StreamSource(settingsReader);
|
||||
|
||||
// Attempt to unmarshal JSON stream to Settings
|
||||
@ -150,7 +152,6 @@ public class Settings {
|
||||
// Add trailing directory separator if needed
|
||||
if (!path.endsWith(File.separator))
|
||||
path += File.separator;
|
||||
|
||||
}
|
||||
} while (settings.userPath != null);
|
||||
|
||||
@ -158,8 +159,7 @@ public class Settings {
|
||||
settings.validate();
|
||||
|
||||
// Minor fix-up
|
||||
if (settings.userPath == null)
|
||||
settings.userPath = "";
|
||||
settings.userPath = path;
|
||||
|
||||
// Successfully read settings now in effect
|
||||
instance = settings;
|
||||
@ -247,6 +247,10 @@ public class Settings {
|
||||
return this.repositoryPath;
|
||||
}
|
||||
|
||||
public boolean isAutoUpdateEnabled() {
|
||||
return this.autoUpdateEnabled;
|
||||
}
|
||||
|
||||
public String[] getAutoUpdateRepos() {
|
||||
return this.autoUpdateRepos;
|
||||
}
|
||||
|
@ -1,56 +0,0 @@
|
||||
package org.qora;
|
||||
import org.qora.block.BlockChain;
|
||||
import org.qora.controller.Controller;
|
||||
import org.qora.data.transaction.TransactionData;
|
||||
import org.qora.repository.DataException;
|
||||
import org.qora.repository.Repository;
|
||||
import org.qora.repository.RepositoryFactory;
|
||||
import org.qora.repository.RepositoryManager;
|
||||
import org.qora.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qora.transform.TransformationException;
|
||||
import org.qora.transform.transaction.TransactionTransformer;
|
||||
import org.qora.utils.Base58;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
public class txhex {
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length == 0) {
|
||||
System.err.println("usage: txhex <base58-tx-signature>");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
byte[] signature = Base58.decode(args[0]);
|
||||
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.connectionUrl);
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
} catch (DataException e) {
|
||||
System.err.println("Couldn't connect to repository: " + e.getMessage());
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
try {
|
||||
BlockChain.validate();
|
||||
} catch (DataException e) {
|
||||
System.err.println("Couldn't validate repository: " + e.getMessage());
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
byte[] bytes = TransactionTransformer.toBytes(transactionData);
|
||||
System.out.println(HashCode.fromBytes(bytes).toString());
|
||||
} catch (DataException | TransformationException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
try {
|
||||
RepositoryManager.closeRepositoryFactory();
|
||||
} catch (DataException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,580 +0,0 @@
|
||||
package org.qora;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.math.BigDecimal;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.BufferUnderflowException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.Security;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.json.simple.JSONArray;
|
||||
import org.json.simple.JSONObject;
|
||||
import org.json.simple.JSONValue;
|
||||
import org.json.simple.parser.ParseException;
|
||||
import org.qora.asset.Asset;
|
||||
import org.qora.block.Block;
|
||||
import org.qora.block.BlockChain;
|
||||
import org.qora.block.Block.ValidationResult;
|
||||
import org.qora.controller.Controller;
|
||||
import org.qora.crypto.Crypto;
|
||||
import org.qora.data.at.ATData;
|
||||
import org.qora.data.at.ATStateData;
|
||||
import org.qora.data.block.BlockData;
|
||||
import org.qora.data.transaction.ATTransactionData;
|
||||
import org.qora.data.transaction.TransactionData;
|
||||
import org.qora.group.Group;
|
||||
import org.qora.repository.DataException;
|
||||
import org.qora.repository.Repository;
|
||||
import org.qora.repository.RepositoryFactory;
|
||||
import org.qora.repository.RepositoryManager;
|
||||
import org.qora.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qora.settings.Settings;
|
||||
import org.qora.transform.TransformationException;
|
||||
import org.qora.transform.block.BlockTransformer;
|
||||
import org.qora.transform.transaction.AtTransactionTransformer;
|
||||
import org.qora.utils.Base58;
|
||||
import org.qora.utils.Pair;
|
||||
import org.qora.utils.Triple;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import com.google.common.primitives.Ints;
|
||||
|
||||
public class v1feeder extends Thread {
|
||||
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
}
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(v1feeder.class);
|
||||
|
||||
private static final int INACTIVITY_TIMEOUT = 60 * 1000; // milliseconds
|
||||
private static final int CONNECTION_TIMEOUT = 2 * 1000; // milliseconds
|
||||
private static final int PING_INTERVAL = 10 * 1000; // milliseconds
|
||||
private static final int DEFAULT_PORT = 9084;
|
||||
|
||||
private static final int MAGIC_LENGTH = 4;
|
||||
// private static final int TYPE_LENGTH = 4;
|
||||
private static final int HAS_ID_LENGTH = 1;
|
||||
// private static final int ID_LENGTH = 4;
|
||||
// private static final int DATA_SIZE_LENGTH = 4;
|
||||
private static final int CHECKSUM_LENGTH = 4;
|
||||
|
||||
private static final int SIGNATURE_LENGTH = 128;
|
||||
|
||||
private static final byte[] MAINNET_MAGIC = { 0x12, 0x34, 0x56, 0x78 };
|
||||
|
||||
// private static final int GET_PEERS_TYPE = 1;
|
||||
// private static final int PEERS_TYPE = 2;
|
||||
private static final int HEIGHT_TYPE = 3;
|
||||
private static final int GET_SIGNATURES_TYPE = 4;
|
||||
private static final int SIGNATURES_TYPE = 5;
|
||||
private static final int GET_BLOCK_TYPE = 6;
|
||||
private static final int BLOCK_TYPE = 7;
|
||||
// private static final int TRANSACTION_TYPE = 8;
|
||||
private static final int PING_TYPE = 9;
|
||||
private static final int VERSION_TYPE = 10;
|
||||
// private static final int FIND_MYSELF_TYPE = 11;
|
||||
|
||||
private Socket socket;
|
||||
private OutputStream out;
|
||||
|
||||
private static final int IDLE_STATE = 0;
|
||||
private static final int AWAITING_HEADERS_STATE = 1;
|
||||
private static final int HAVE_HEADERS_STATE = 2;
|
||||
private static final int AWAITING_BLOCK_STATE = 3;
|
||||
private static final int HAVE_BLOCK_STATE = 4;
|
||||
private int feederState = IDLE_STATE;
|
||||
private int messageId = -1;
|
||||
|
||||
private long lastPingTimestamp = System.currentTimeMillis();
|
||||
private List<byte[]> signatures = new ArrayList<byte[]>();
|
||||
|
||||
private static Map<Pair<String, Integer>, BigDecimal> legacyATFees;
|
||||
private static Map<Integer, List<TransactionData>> legacyATTransactions;
|
||||
|
||||
private v1feeder(String address, int port) throws InterruptedException {
|
||||
try {
|
||||
for (int i = 0; i < 10; ++i)
|
||||
try {
|
||||
// Create new socket for connection to peer
|
||||
this.socket = new Socket();
|
||||
|
||||
// Collate this.address and destination port
|
||||
InetSocketAddress socketAddress = new InetSocketAddress(address, port);
|
||||
|
||||
// Attempt to connect, with timeout from settings
|
||||
this.socket.connect(socketAddress, CONNECTION_TIMEOUT);
|
||||
break;
|
||||
} catch (SocketTimeoutException e) {
|
||||
LOGGER.info("Timed out trying to connect to " + address + " - retrying");
|
||||
Thread.sleep(1000);
|
||||
this.socket = null;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to connect to " + address, e);
|
||||
return;
|
||||
}
|
||||
|
||||
// No connection after retries?
|
||||
if (this.socket == null)
|
||||
return;
|
||||
|
||||
// Enable TCP keep-alive packets
|
||||
this.socket.setKeepAlive(true);
|
||||
|
||||
// Inactivity timeout
|
||||
this.socket.setSoTimeout(INACTIVITY_TIMEOUT);
|
||||
|
||||
// Grab reference to output stream
|
||||
this.out = socket.getOutputStream();
|
||||
|
||||
// Start main communication thread
|
||||
this.start();
|
||||
} catch (SocketException e) {
|
||||
LOGGER.error("Failed to set socket timeout for address " + address, e);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to get output stream for address " + address, e);
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] createMessage(int type, boolean hasId, Integer id, byte[] data) throws IOException {
|
||||
if (data == null)
|
||||
data = new byte[0];
|
||||
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
|
||||
bytes.write(MAINNET_MAGIC);
|
||||
|
||||
bytes.write(Ints.toByteArray(type));
|
||||
|
||||
byte[] hasIdBytes = new byte[] { (byte) (hasId ? 1 : 0) };
|
||||
bytes.write(hasIdBytes);
|
||||
|
||||
if (hasId) {
|
||||
if (id == null)
|
||||
id = (int) ((Math.random() * 1000000) + 1);
|
||||
|
||||
bytes.write(Ints.toByteArray(id));
|
||||
}
|
||||
|
||||
bytes.write(Ints.toByteArray(data.length));
|
||||
|
||||
if (data.length > 0) {
|
||||
byte[] checksum = Crypto.digest(data);
|
||||
bytes.write(checksum, 0, CHECKSUM_LENGTH);
|
||||
|
||||
bytes.write(data);
|
||||
}
|
||||
|
||||
LOGGER.trace("Creating message type [" + type + "] with " + (hasId ? "id [" + id + "]" : "no id") + " and data length " + data.length);
|
||||
|
||||
return bytes.toByteArray();
|
||||
}
|
||||
|
||||
private void sendMessage(byte[] message) throws IOException {
|
||||
synchronized (this.out) {
|
||||
this.out.write(message);
|
||||
this.out.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private void processMessage(int type, int id, byte[] data) throws IOException {
|
||||
LOGGER.trace("Received message type [" + type + "] with id [" + id + "] and data length " + data.length);
|
||||
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(data);
|
||||
switch (type) {
|
||||
case HEIGHT_TYPE:
|
||||
int height = byteBuffer.getInt();
|
||||
|
||||
LOGGER.info("Peer height: " + height);
|
||||
break;
|
||||
|
||||
case SIGNATURES_TYPE:
|
||||
// shove into list
|
||||
int numSignatures = byteBuffer.getInt();
|
||||
|
||||
if (numSignatures == 0)
|
||||
throw new RuntimeException("No signatures from peer - are we up to date?");
|
||||
|
||||
while (numSignatures-- > 0) {
|
||||
byte[] signature = new byte[SIGNATURE_LENGTH];
|
||||
byteBuffer.get(signature);
|
||||
signatures.add(signature);
|
||||
}
|
||||
|
||||
LOGGER.trace("We now have " + signatures.size() + " signature(s) to process");
|
||||
|
||||
feederState = HAVE_HEADERS_STATE;
|
||||
break;
|
||||
|
||||
case BLOCK_TYPE:
|
||||
// If messageId doesn't match then discard
|
||||
if (id != this.messageId)
|
||||
break;
|
||||
|
||||
// read block and process
|
||||
int claimedHeight = byteBuffer.getInt();
|
||||
|
||||
LOGGER.info("Received block allegedly at height " + claimedHeight);
|
||||
|
||||
byte[] blockBytes = new byte[byteBuffer.remaining()];
|
||||
byteBuffer.get(blockBytes);
|
||||
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = null;
|
||||
|
||||
try {
|
||||
blockInfo = BlockTransformer.fromBytes(blockBytes);
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.error("Couldn't parse block bytes from peer", e);
|
||||
throw new RuntimeException("Couldn't parse block bytes from peer", e);
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
BlockData blockData = blockInfo.getA();
|
||||
|
||||
// Adjust AT state data to include fees
|
||||
List<ATStateData> atStates = new ArrayList<ATStateData>();
|
||||
for (ATStateData atState : blockInfo.getC()) {
|
||||
BigDecimal fees = legacyATFees.get(new Pair<String, Integer>(atState.getATAddress(), claimedHeight));
|
||||
ATData atData = repository.getATRepository().fromATAddress(atState.getATAddress());
|
||||
|
||||
atStates.add(new ATStateData(atState.getATAddress(), claimedHeight, atData.getCreation(), null, atState.getStateHash(), fees));
|
||||
}
|
||||
|
||||
// AT-Transaction injection goes here!
|
||||
List<TransactionData> transactions = blockInfo.getB();
|
||||
List<TransactionData> atTransactions = legacyATTransactions.get(claimedHeight);
|
||||
if (atTransactions != null) {
|
||||
transactions.addAll(0, atTransactions);
|
||||
blockData.setTransactionCount(blockData.getTransactionCount() + atTransactions.size());
|
||||
}
|
||||
|
||||
Block block = new Block(repository, blockData, transactions, atStates);
|
||||
|
||||
if (!block.isSignatureValid()) {
|
||||
LOGGER.error("Invalid block signature");
|
||||
throw new RuntimeException("Invalid block signature");
|
||||
}
|
||||
|
||||
ValidationResult result = block.isValid();
|
||||
|
||||
if (result != ValidationResult.OK) {
|
||||
LOGGER.error("Invalid block, validation result: " + result.name());
|
||||
throw new RuntimeException("Invalid block, validation result: " + result.name());
|
||||
}
|
||||
|
||||
block.process();
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Unable to process block", e);
|
||||
throw new RuntimeException("Unable to process block", e);
|
||||
}
|
||||
|
||||
feederState = HAVE_BLOCK_STATE;
|
||||
break;
|
||||
|
||||
case PING_TYPE:
|
||||
LOGGER.trace("Sending pong for ping [" + id + "]");
|
||||
byte[] pongMessage = createMessage(PING_TYPE, true, id, null);
|
||||
sendMessage(pongMessage);
|
||||
break;
|
||||
|
||||
case VERSION_TYPE:
|
||||
@SuppressWarnings("unused")
|
||||
long timestamp = byteBuffer.getLong();
|
||||
int versionLength = byteBuffer.getInt();
|
||||
byte[] versionBytes = new byte[versionLength];
|
||||
byteBuffer.get(versionBytes);
|
||||
String version = new String(versionBytes, Charset.forName("UTF-8"));
|
||||
|
||||
LOGGER.info("Peer version info: " + version);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOGGER.trace("Discarding message type [" + type + "] with id [" + id + "] and data length " + data.length);
|
||||
}
|
||||
}
|
||||
|
||||
private int parseBuffer(byte[] buffer, int bufferEnd) throws IOException {
|
||||
int newBufferEnd = bufferEnd;
|
||||
|
||||
try {
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(buffer, 0, bufferEnd);
|
||||
|
||||
// Check magic
|
||||
byte[] magic = new byte[MAGIC_LENGTH];
|
||||
byteBuffer.get(magic);
|
||||
if (!Arrays.equals(magic, MAINNET_MAGIC)) {
|
||||
// bad data - discard whole buffer
|
||||
return 0;
|
||||
}
|
||||
|
||||
int type = byteBuffer.getInt();
|
||||
|
||||
byte[] hasId = new byte[HAS_ID_LENGTH];
|
||||
byteBuffer.get(hasId);
|
||||
|
||||
int id = -1;
|
||||
if (hasId[0] == (byte) 1)
|
||||
id = byteBuffer.getInt();
|
||||
|
||||
int dataSize = byteBuffer.getInt();
|
||||
byte[] data = new byte[dataSize];
|
||||
if (dataSize > 0) {
|
||||
byte[] checksum = new byte[CHECKSUM_LENGTH];
|
||||
byteBuffer.get(checksum);
|
||||
|
||||
byteBuffer.get(data);
|
||||
}
|
||||
|
||||
// We have a full message - remove from buffer
|
||||
int nextMessageOffset = byteBuffer.position();
|
||||
newBufferEnd = bufferEnd - nextMessageOffset;
|
||||
byteBuffer = null;
|
||||
|
||||
System.arraycopy(buffer, nextMessageOffset, buffer, 0, newBufferEnd);
|
||||
|
||||
// Process message
|
||||
processMessage(type, id, data);
|
||||
} catch (BufferUnderflowException e) {
|
||||
// Not enough data
|
||||
}
|
||||
|
||||
return newBufferEnd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
DataInputStream in = new DataInputStream(socket.getInputStream());
|
||||
byte[] buffer = new byte[2 * 1024 * 1024]; // 2MB
|
||||
int bufferEnd = 0;
|
||||
|
||||
// Send our height
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int height = repository.getBlockRepository().getBlockchainHeight();
|
||||
LOGGER.trace("Sending our height " + height + " to peer");
|
||||
byte[] heightMessage = createMessage(HEIGHT_TYPE, false, null, Ints.toByteArray(height));
|
||||
sendMessage(heightMessage);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
// Anything to read?
|
||||
if (in.available() > 0) {
|
||||
// read message
|
||||
int numRead = in.read(buffer, bufferEnd, in.available());
|
||||
if (numRead == -1) {
|
||||
// input EOF
|
||||
LOGGER.info("Socket EOF");
|
||||
return;
|
||||
}
|
||||
|
||||
bufferEnd += numRead;
|
||||
}
|
||||
|
||||
if (bufferEnd > 0) {
|
||||
// attempt to parse
|
||||
bufferEnd = parseBuffer(buffer, bufferEnd);
|
||||
}
|
||||
|
||||
// Do we need to send a ping message?
|
||||
if (System.currentTimeMillis() - lastPingTimestamp >= PING_INTERVAL) {
|
||||
byte[] pingMessage = createMessage(PING_TYPE, true, null, null);
|
||||
sendMessage(pingMessage);
|
||||
lastPingTimestamp = System.currentTimeMillis();
|
||||
}
|
||||
|
||||
byte[] signature = null;
|
||||
switch (feederState) {
|
||||
case IDLE_STATE:
|
||||
// Get signature from our highest block
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
BlockData blockData = repository.getBlockRepository().getLastBlock();
|
||||
|
||||
if (blockData != null)
|
||||
signature = blockData.getSignature();
|
||||
}
|
||||
|
||||
// done?
|
||||
if (signature == null) {
|
||||
LOGGER.warn("No last block in repository?");
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.trace("Requesting more signatures...");
|
||||
byte[] getSignaturesMessage = createMessage(GET_SIGNATURES_TYPE, true, null, signature);
|
||||
sendMessage(getSignaturesMessage);
|
||||
feederState = AWAITING_HEADERS_STATE;
|
||||
break;
|
||||
|
||||
case HAVE_HEADERS_STATE:
|
||||
case HAVE_BLOCK_STATE:
|
||||
// request next block?
|
||||
if (signatures.size() == 0) {
|
||||
feederState = IDLE_STATE;
|
||||
break;
|
||||
}
|
||||
|
||||
LOGGER.trace("Requesting next block...");
|
||||
signature = signatures.remove(0);
|
||||
this.messageId = (int) ((Math.random() * 1000000) + 1);
|
||||
byte[] getBlockMessage = createMessage(GET_BLOCK_TYPE, true, this.messageId, signature);
|
||||
sendMessage(getBlockMessage);
|
||||
feederState = AWAITING_BLOCK_STATE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (IOException | DataException | RuntimeException e) {
|
||||
// give up
|
||||
LOGGER.info("Exiting", e);
|
||||
}
|
||||
|
||||
try {
|
||||
this.socket.close();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private static void readLegacyATs(String legacyATPathname) {
|
||||
legacyATFees = new HashMap<Pair<String, Integer>, BigDecimal>();
|
||||
legacyATTransactions = new HashMap<Integer, List<TransactionData>>();
|
||||
|
||||
Path path = Paths.get(legacyATPathname);
|
||||
|
||||
JSONArray json = null;
|
||||
|
||||
try (BufferedReader in = Files.newBufferedReader(path)) {
|
||||
json = (JSONArray) JSONValue.parseWithException(in);
|
||||
} catch (IOException | ParseException e) {
|
||||
throw new RuntimeException("Couldn't read legacy AT JSON file");
|
||||
}
|
||||
|
||||
for (Object o : json) {
|
||||
JSONObject entry = (JSONObject) o;
|
||||
|
||||
int height = Integer.parseInt((String) entry.get("height"));
|
||||
long timestamp = (Long) entry.get("timestamp");
|
||||
|
||||
JSONArray transactionEntries = (JSONArray) entry.get("transactions");
|
||||
|
||||
List<TransactionData> transactions = new ArrayList<TransactionData>();
|
||||
|
||||
for (Object t : transactionEntries) {
|
||||
JSONObject transactionEntry = (JSONObject) t;
|
||||
|
||||
String recipient = (String) transactionEntry.get("recipient");
|
||||
String sender = (String) transactionEntry.get("sender");
|
||||
BigDecimal amount = new BigDecimal((String) transactionEntry.get("amount")).setScale(8);
|
||||
|
||||
if (recipient.equals("1111111111111111111111111")) {
|
||||
// fee
|
||||
legacyATFees.put(new Pair<String, Integer>(sender, height), amount);
|
||||
} else {
|
||||
// Actual AT Transaction
|
||||
String messageString = (String) transactionEntry.get("message");
|
||||
byte[] message = messageString.isEmpty() ? new byte[0] : HashCode.fromString(messageString).asBytes();
|
||||
int sequence = ((Long) transactionEntry.get("seq")).intValue();
|
||||
byte[] reference = Base58.decode((String) transactionEntry.get("reference"));
|
||||
|
||||
// reference is AT's deploy tx signature
|
||||
// sender's public key is genesis account
|
||||
// zero fee
|
||||
// timestamp is block's timestamp
|
||||
// signature = duplicated hash of transaction data
|
||||
|
||||
BigDecimal fee = BigDecimal.ZERO.setScale(8);
|
||||
|
||||
TransactionData transactionData = new ATTransactionData(timestamp, Group.NO_GROUP, reference, sender, recipient, amount, Asset.QORA, message, fee);
|
||||
byte[] digest;
|
||||
try {
|
||||
digest = Crypto.digest(AtTransactionTransformer.toBytes(transactionData));
|
||||
byte[] signature = Bytes.concat(digest, digest);
|
||||
|
||||
transactionData = new ATTransactionData(timestamp, Group.NO_GROUP, reference, sender, recipient, amount, Asset.QORA, message, fee, signature);
|
||||
} catch (TransformationException e) {
|
||||
throw new RuntimeException("Couldn't transform AT Transaction into bytes", e);
|
||||
}
|
||||
|
||||
if (sequence > transactions.size())
|
||||
transactions.add(transactionData);
|
||||
else
|
||||
transactions.add(sequence, transactionData);
|
||||
}
|
||||
}
|
||||
|
||||
if (!transactions.isEmpty())
|
||||
legacyATTransactions.put(height, transactions);
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length < 2 || args.length > 3) {
|
||||
System.err.println("usage: v1feeder legacy-AT-json v1-node-address [port]");
|
||||
System.err.println("example: v1feeder legacy-ATs.json 10.0.0.100 9084");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
String legacyATPathname = args[0];
|
||||
readLegacyATs(legacyATPathname);
|
||||
|
||||
Security.insertProviderAt(new BouncyCastleProvider(), 0);
|
||||
Security.insertProviderAt(new BouncyCastleJsseProvider(), 1);
|
||||
|
||||
Settings.getInstance();
|
||||
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.connectionUrl);
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't connect to repository", e);
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
try {
|
||||
BlockChain.validate();
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't validate repository", e);
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
// connect to v1 node
|
||||
String address = args[1];
|
||||
int port = args.length > 2 ? Integer.valueOf(args[2]) : DEFAULT_PORT;
|
||||
|
||||
try {
|
||||
new v1feeder(address, port).join();
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
LOGGER.info("Exiting v1feeder");
|
||||
|
||||
try {
|
||||
RepositoryManager.closeRepositoryFactory();
|
||||
} catch (DataException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user