diff --git a/.gitignore b/.gitignore index 55b4f8d5..e26d6244 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ /WindowsInstaller/Install Files/qortal.jar /*.7z /tmp +/data* +/src/test/resources/arbitrary/*/.qortal/cache +apikey.txt diff --git a/README.md b/README.md index 9dd9ad60..e9001f9c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ -# Qortal Project - Official Repo +# Qortal Data Node + +## Important + +This code is unfinished, and we haven't had the official genesis block for the data chain yet. +Therefore it is only possible to use this code if you first create your own test chain. I would +highly recommend waiting until the code is in a more complete state before trying to run this. ## Build / run diff --git a/log4j2.properties b/log4j2.properties index fdbf51dd..44e1b1e3 100644 --- a/log4j2.properties +++ b/log4j2.properties @@ -61,7 +61,7 @@ appender.rolling.type = RollingFile appender.rolling.name = FILE appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -appender.rolling.filePattern = ${dirname:-}${filename}.%i +appender.rolling.filePattern = ./${filename}.%i appender.rolling.policy.type = SizeBasedTriggeringPolicy appender.rolling.policy.size = 4MB # Set the immediate flush to true (default) diff --git a/pom.xml b/pom.xml index 01fc2b86..59f4e63b 100644 --- a/pom.xml +++ b/pom.xml @@ -16,18 +16,21 @@ 1.8 2.6 1.21 + 3.12.0 1.9 1.2.2 28.1-jre 2.5.1 2.29.1 9.4.29.v20200521 - 2.12.1 + 2.17.1 UTF-8 1.7.12 2.0.9 3.23.8 1.1.0 + 1.13.1 + 4.10 src/main/java @@ -462,6 +465,11 @@ commons-compress ${commons-compress.version} + + org.apache.commons + commons-lang3 + ${commons-lang3.version} + org.tukaani xz @@ -667,5 +675,15 @@ bctls-jdk15on ${bouncycastle.version} + + org.jsoup + jsoup + ${jsoup.version} + + + io.github.java-diff-utils + java-diff-utils + ${java-diff-utils.version} + diff --git a/src/main/java/org/qortal/api/ApiError.java b/src/main/java/org/qortal/api/ApiError.java index a5c10c1a..659104e7 100644 --- a/src/main/java/org/qortal/api/ApiError.java +++ b/src/main/java/org/qortal/api/ApiError.java @@ -132,7 +132,11 @@ public enum ApiError { FOREIGN_BLOCKCHAIN_TOO_SOON(1203, 408), // Trade portal - ORDER_SIZE_TOO_SMALL(1300, 402); + ORDER_SIZE_TOO_SMALL(1300, 402), + + // Data + FILE_NOT_FOUND(1401, 404), + NO_REPLY(1402, 404); private static final Map map = stream(ApiError.values()).collect(toMap(apiError -> apiError.code, apiError -> apiError)); diff --git a/src/main/java/org/qortal/api/ApiKey.java b/src/main/java/org/qortal/api/ApiKey.java new file mode 100644 index 00000000..6a79dd20 --- /dev/null +++ b/src/main/java/org/qortal/api/ApiKey.java @@ -0,0 +1,98 @@ +package org.qortal.api; + +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.SecureRandom; + +public class ApiKey { + + private String apiKey; + + public ApiKey() throws IOException { + this.load(); + } + + public void generate() throws IOException { + byte[] apiKey = new byte[16]; + new SecureRandom().nextBytes(apiKey); + this.apiKey = Base58.encode(apiKey); + + this.save(); + } + + + /* Filesystem */ + + private Path getFilePath() { + return Paths.get(Settings.getInstance().getApiKeyPath(), "apikey.txt"); + } + + private boolean load() throws IOException { + Path path = this.getFilePath(); + File apiKeyFile = new File(path.toString()); + if (!apiKeyFile.exists()) { + // Try settings - to allow legacy API keys to be supported + return this.loadLegacyApiKey(); + } + + try { + this.apiKey = new String(Files.readAllBytes(path)); + + } catch (IOException e) { + throw new IOException(String.format("Couldn't read contents from file %s", path.toString())); + } + + return true; + } + + private boolean loadLegacyApiKey() { + String legacyApiKey = Settings.getInstance().getApiKey(); + if (legacyApiKey != null && !legacyApiKey.isEmpty()) { + this.apiKey = Settings.getInstance().getApiKey(); + + try { + // Save it to the apikey file + this.save(); + } catch (IOException e) { + // Ignore failures as it will be reloaded from settings next time + } + return true; + } + return false; + } + + public void save() throws IOException { + if (this.apiKey == null || this.apiKey.isEmpty()) { + throw new IllegalStateException("Unable to save a blank API key"); + } + + Path filePath = this.getFilePath(); + + BufferedWriter writer = new BufferedWriter(new FileWriter(filePath.toString())); + writer.write(this.apiKey); + writer.close(); + } + + + public boolean generated() { + return (this.apiKey != null); + } + + public boolean exists() { + return this.getFilePath().toFile().exists(); + } + + @Override + public String toString() { + return this.apiKey; + } + +} diff --git a/src/main/java/org/qortal/api/ApiService.java b/src/main/java/org/qortal/api/ApiService.java index cafba4ae..697543c7 100644 --- a/src/main/java/org/qortal/api/ApiService.java +++ b/src/main/java/org/qortal/api/ApiService.java @@ -14,8 +14,7 @@ import java.security.SecureRandom; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.checkerframework.checker.units.qual.A; import org.eclipse.jetty.http.HttpVersion; import org.eclipse.jetty.rewrite.handler.RedirectPatternRule; import org.eclipse.jetty.rewrite.handler.RewriteHandler; @@ -52,12 +51,11 @@ import org.qortal.settings.Settings; public class ApiService { - private static final Logger LOGGER = LogManager.getLogger(ApiService.class); - private static ApiService instance; private final ResourceConfig config; private Server server; + private ApiKey apiKey; private ApiService() { this.config = new ResourceConfig(); @@ -78,6 +76,15 @@ public class ApiService { return this.config.getClasses(); } + public void setApiKey(ApiKey apiKey) { + this.apiKey = apiKey; + } + + public ApiKey getApiKey() { + return this.apiKey; + } + + public void start() { try { // Create API server @@ -207,9 +214,6 @@ public class ApiService { context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot"); context.addServlet(PresenceWebSocket.class, "/websockets/presence"); - // Warn about API security if needed - this.checkApiSecurity(); - // Start server this.server.start(); } catch (Exception e) { @@ -229,23 +233,4 @@ public class ApiService { this.server = null; } - private void checkApiSecurity() { - // Warn about API security if needed - boolean allConnectionsAllowed = false; - if (Settings.getInstance().isApiKeyDisabled()) { - for (String pattern : Settings.getInstance().getApiWhitelist()) { - if (pattern.startsWith("0.0.0.0/") || pattern.startsWith("::/") || pattern.endsWith("/0")) { - allConnectionsAllowed = true; - } - } - - if (allConnectionsAllowed) { - LOGGER.warn("Warning: API key validation is currently disabled, and the API whitelist " + - "is allowing all connections. This can be a security risk."); - LOGGER.warn("To fix, set the apiKeyDisabled setting to false, or allow only specific local " + - "IP addresses using the apiWhitelist setting."); - } - } - } - } diff --git a/src/main/java/org/qortal/api/DomainMapService.java b/src/main/java/org/qortal/api/DomainMapService.java new file mode 100644 index 00000000..ba0fa067 --- /dev/null +++ b/src/main/java/org/qortal/api/DomainMapService.java @@ -0,0 +1,171 @@ +package org.qortal.api; + +import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource; +import org.eclipse.jetty.http.HttpVersion; +import org.eclipse.jetty.rewrite.handler.RewriteHandler; +import org.eclipse.jetty.rewrite.handler.RewritePatternRule; +import org.eclipse.jetty.server.*; +import org.eclipse.jetty.server.handler.ErrorHandler; +import org.eclipse.jetty.server.handler.InetAccessHandler; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.servlets.CrossOriginFilter; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletContainer; +import org.qortal.api.resource.AnnotationPostProcessor; +import org.qortal.api.resource.ApiDefinition; +import org.qortal.settings.Settings; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyStore; +import java.security.SecureRandom; + +public class DomainMapService { + + private static DomainMapService instance; + + private final ResourceConfig config; + private Server server; + + private DomainMapService() { + this.config = new ResourceConfig(); + this.config.packages("org.qortal.api.domainmap.resource"); + this.config.register(OpenApiResource.class); + this.config.register(ApiDefinition.class); + this.config.register(AnnotationPostProcessor.class); + } + + public static DomainMapService getInstance() { + if (instance == null) + instance = new DomainMapService(); + + return instance; + } + + public Iterable> getResources() { + return this.config.getClasses(); + } + + public void start() { + try { + // Create API server + + // SSL support if requested + String keystorePathname = Settings.getInstance().getSslKeystorePathname(); + String keystorePassword = Settings.getInstance().getSslKeystorePassword(); + + if (keystorePathname != null && keystorePassword != null) { + // SSL version + if (!Files.isReadable(Path.of(keystorePathname))) + throw new RuntimeException("Failed to start SSL API due to broken keystore"); + + // BouncyCastle-specific SSLContext build + SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE"); + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE"); + + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC"); + + try (InputStream keystoreStream = Files.newInputStream(Paths.get(keystorePathname))) { + keyStore.load(keystoreStream, keystorePassword.toCharArray()); + } + + keyManagerFactory.init(keyStore, keystorePassword.toCharArray()); + sslContext.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom()); + + SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); + sslContextFactory.setSslContext(sslContext); + + this.server = new Server(); + + HttpConfiguration httpConfig = new HttpConfiguration(); + httpConfig.setSecureScheme("https"); + httpConfig.setSecurePort(Settings.getInstance().getDomainMapPort()); + + SecureRequestCustomizer src = new SecureRequestCustomizer(); + httpConfig.addCustomizer(src); + + HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfig); + SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString()); + + ServerConnector portUnifiedConnector = new ServerConnector(this.server, + new DetectorConnectionFactory(sslConnectionFactory), + httpConnectionFactory); + portUnifiedConnector.setHost(Settings.getInstance().getBindAddress()); + portUnifiedConnector.setPort(Settings.getInstance().getDomainMapPort()); + + this.server.addConnector(portUnifiedConnector); + } else { + // Non-SSL + InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress()); + InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getDomainMapPort()); + this.server = new Server(endpoint); + } + + // Error handler + ErrorHandler errorHandler = new ApiErrorHandler(); + this.server.setErrorHandler(errorHandler); + + // Request logging + if (Settings.getInstance().isDomainMapLoggingEnabled()) { + RequestLogWriter logWriter = new RequestLogWriter("domainmap-requests.log"); + logWriter.setAppend(true); + logWriter.setTimeZone("UTC"); + RequestLog requestLog = new CustomRequestLog(logWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT); + this.server.setRequestLog(requestLog); + } + + // Access handler (currently no whitelist is used) + InetAccessHandler accessHandler = new InetAccessHandler(); + this.server.setHandler(accessHandler); + + // URL rewriting + RewriteHandler rewriteHandler = new RewriteHandler(); + accessHandler.setHandler(rewriteHandler); + + // Context + ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SESSIONS); + context.setContextPath("/"); + rewriteHandler.setHandler(context); + + // Cross-origin resource sharing + FilterHolder corsFilterHolder = new FilterHolder(CrossOriginFilter.class); + corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*"); + corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET, POST, DELETE"); + corsFilterHolder.setInitParameter(CrossOriginFilter.CHAIN_PREFLIGHT_PARAM, "false"); + context.addFilter(corsFilterHolder, "/*", null); + + // API servlet + ServletContainer container = new ServletContainer(this.config); + ServletHolder apiServlet = new ServletHolder(container); + apiServlet.setInitOrder(1); + context.addServlet(apiServlet, "/*"); + + // Start server + this.server.start(); + } catch (Exception e) { + // Failed to start + throw new RuntimeException("Failed to start API", e); + } + } + + public void stop() { + try { + // Stop server + this.server.stop(); + } catch (Exception e) { + // Failed to stop + } + + this.server = null; + } + +} diff --git a/src/main/java/org/qortal/api/GatewayService.java b/src/main/java/org/qortal/api/GatewayService.java new file mode 100644 index 00000000..030a0f2f --- /dev/null +++ b/src/main/java/org/qortal/api/GatewayService.java @@ -0,0 +1,170 @@ +package org.qortal.api; + +import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource; +import org.eclipse.jetty.http.HttpVersion; +import org.eclipse.jetty.rewrite.handler.RewriteHandler; +import org.eclipse.jetty.server.*; +import org.eclipse.jetty.server.handler.ErrorHandler; +import org.eclipse.jetty.server.handler.InetAccessHandler; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.servlets.CrossOriginFilter; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletContainer; +import org.qortal.api.resource.AnnotationPostProcessor; +import org.qortal.api.resource.ApiDefinition; +import org.qortal.settings.Settings; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyStore; +import java.security.SecureRandom; + +public class GatewayService { + + private static GatewayService instance; + + private final ResourceConfig config; + private Server server; + + private GatewayService() { + this.config = new ResourceConfig(); + this.config.packages("org.qortal.api.gateway.resource"); + this.config.register(OpenApiResource.class); + this.config.register(ApiDefinition.class); + this.config.register(AnnotationPostProcessor.class); + } + + public static GatewayService getInstance() { + if (instance == null) + instance = new GatewayService(); + + return instance; + } + + public Iterable> getResources() { + return this.config.getClasses(); + } + + public void start() { + try { + // Create API server + + // SSL support if requested + String keystorePathname = Settings.getInstance().getSslKeystorePathname(); + String keystorePassword = Settings.getInstance().getSslKeystorePassword(); + + if (keystorePathname != null && keystorePassword != null) { + // SSL version + if (!Files.isReadable(Path.of(keystorePathname))) + throw new RuntimeException("Failed to start SSL API due to broken keystore"); + + // BouncyCastle-specific SSLContext build + SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE"); + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE"); + + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC"); + + try (InputStream keystoreStream = Files.newInputStream(Paths.get(keystorePathname))) { + keyStore.load(keystoreStream, keystorePassword.toCharArray()); + } + + keyManagerFactory.init(keyStore, keystorePassword.toCharArray()); + sslContext.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom()); + + SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); + sslContextFactory.setSslContext(sslContext); + + this.server = new Server(); + + HttpConfiguration httpConfig = new HttpConfiguration(); + httpConfig.setSecureScheme("https"); + httpConfig.setSecurePort(Settings.getInstance().getGatewayPort()); + + SecureRequestCustomizer src = new SecureRequestCustomizer(); + httpConfig.addCustomizer(src); + + HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfig); + SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString()); + + ServerConnector portUnifiedConnector = new ServerConnector(this.server, + new DetectorConnectionFactory(sslConnectionFactory), + httpConnectionFactory); + portUnifiedConnector.setHost(Settings.getInstance().getBindAddress()); + portUnifiedConnector.setPort(Settings.getInstance().getGatewayPort()); + + this.server.addConnector(portUnifiedConnector); + } else { + // Non-SSL + InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress()); + InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getGatewayPort()); + this.server = new Server(endpoint); + } + + // Error handler + ErrorHandler errorHandler = new ApiErrorHandler(); + this.server.setErrorHandler(errorHandler); + + // Request logging + if (Settings.getInstance().isGatewayLoggingEnabled()) { + RequestLogWriter logWriter = new RequestLogWriter("gateway-requests.log"); + logWriter.setAppend(true); + logWriter.setTimeZone("UTC"); + RequestLog requestLog = new CustomRequestLog(logWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT); + this.server.setRequestLog(requestLog); + } + + // Access handler (currently no whitelist is used) + InetAccessHandler accessHandler = new InetAccessHandler(); + this.server.setHandler(accessHandler); + + // URL rewriting + RewriteHandler rewriteHandler = new RewriteHandler(); + accessHandler.setHandler(rewriteHandler); + + // Context + ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SESSIONS); + context.setContextPath("/"); + rewriteHandler.setHandler(context); + + // Cross-origin resource sharing + FilterHolder corsFilterHolder = new FilterHolder(CrossOriginFilter.class); + corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*"); + corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET, POST, DELETE"); + corsFilterHolder.setInitParameter(CrossOriginFilter.CHAIN_PREFLIGHT_PARAM, "false"); + context.addFilter(corsFilterHolder, "/*", null); + + // API servlet + ServletContainer container = new ServletContainer(this.config); + ServletHolder apiServlet = new ServletHolder(container); + apiServlet.setInitOrder(1); + context.addServlet(apiServlet, "/*"); + + // Start server + this.server.start(); + } catch (Exception e) { + // Failed to start + throw new RuntimeException("Failed to start API", e); + } + } + + public void stop() { + try { + // Stop server + this.server.stop(); + } catch (Exception e) { + // Failed to stop + } + + this.server = null; + } + +} diff --git a/src/main/java/org/qortal/api/HTMLParser.java b/src/main/java/org/qortal/api/HTMLParser.java new file mode 100644 index 00000000..51e0854e --- /dev/null +++ b/src/main/java/org/qortal/api/HTMLParser.java @@ -0,0 +1,45 @@ +package org.qortal.api; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jsoup.Jsoup; +import org.jsoup.nodes.Document; +import org.jsoup.select.Elements; + +public class HTMLParser { + + private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class); + + private String linkPrefix; + private byte[] data; + + public HTMLParser(String resourceId, String inPath, String prefix, boolean usePrefix, byte[] data) { + String inPathWithoutFilename = inPath.substring(0, inPath.lastIndexOf('/')); + this.linkPrefix = usePrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : ""; + this.data = data; + } + + public void setDocumentBaseUrl() { + String fileContents = new String(data); + Document document = Jsoup.parse(fileContents); + String baseUrl = this.linkPrefix + "/"; + Elements head = document.getElementsByTag("head"); + if (!head.isEmpty()) { + String baseElement = String.format("", baseUrl); + head.get(0).prepend(baseElement); + } + String html = document.html(); + this.data = html.getBytes(); + } + + public static boolean isHtmlFile(String path) { + if (path.endsWith(".html") || path.endsWith(".htm")) { + return true; + } + return false; + } + + public byte[] getData() { + return this.data; + } +} diff --git a/src/main/java/org/qortal/api/Security.java b/src/main/java/org/qortal/api/Security.java index 4e25b03b..09c98988 100644 --- a/src/main/java/org/qortal/api/Security.java +++ b/src/main/java/org/qortal/api/Security.java @@ -1,38 +1,98 @@ package org.qortal.api; +import org.qortal.arbitrary.ArbitraryDataResource; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataRenderManager; +import org.qortal.settings.Settings; + +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import javax.servlet.http.HttpServletRequest; -import org.qortal.settings.Settings; - public abstract class Security { public static final String API_KEY_HEADER = "X-API-KEY"; public static void checkApiCallAllowed(HttpServletRequest request) { - // If API key checking has been disabled, we will allow the request in all cases - boolean isApiKeyDisabled = Settings.getInstance().isApiKeyDisabled(); - if (isApiKeyDisabled) - return; + // We may want to allow automatic authentication for local requests, if enabled in settings + boolean localAuthBypassEnabled = Settings.getInstance().isLocalAuthBypassEnabled(); + if (localAuthBypassEnabled) { + try { + InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr()); + if (remoteAddr.isLoopbackAddress()) { + // Request originates from loopback address, so allow it + return; + } + } catch (UnknownHostException e) { + // Ignore failure, and fallback to API key authentication + } + } - String expectedApiKey = Settings.getInstance().getApiKey(); + // Retrieve the API key + ApiKey apiKey = Security.getApiKey(request); + if (!apiKey.generated()) { + // Not generated an API key yet, so disallow sensitive API calls + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "API key not generated"); + } + + // We require an API key to be passed String passedApiKey = request.getHeader(API_KEY_HEADER); + if (passedApiKey == null) { + // Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141 + passedApiKey = request.getParameter("apiKey"); + } + if (passedApiKey == null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Missing 'X-API-KEY' header"); + } - if ((expectedApiKey != null && !expectedApiKey.equals(passedApiKey)) || - (passedApiKey != null && !passedApiKey.equals(expectedApiKey))) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED); + // The API keys must match + if (!apiKey.toString().equals(passedApiKey)) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "API key invalid"); + } + } - InetAddress remoteAddr; + public static void disallowLoopbackRequests(HttpServletRequest request) { try { - remoteAddr = InetAddress.getByName(request.getRemoteAddr()); + InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr()); + if (remoteAddr.isLoopbackAddress()) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Local requests not allowed"); + } } catch (UnknownHostException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED); } + } - if (!remoteAddr.isLoopbackAddress()) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED); + public static void requirePriorAuthorization(HttpServletRequest request, String resourceId, Service service, String identifier) { + ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, identifier); + if (!ArbitraryDataRenderManager.getInstance().isAuthorized(resource)) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Call /render/authorize first"); + } + } + + public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier) { + try { + Security.checkApiCallAllowed(request); + + } catch (ApiException e) { + // API call wasn't allowed, but maybe it was pre-authorized + Security.requirePriorAuthorization(request, resourceId, service, identifier); + } + } + + public static ApiKey getApiKey(HttpServletRequest request) { + ApiKey apiKey = ApiService.getInstance().getApiKey(); + if (apiKey == null) { + try { + apiKey = new ApiKey(); + } catch (IOException e) { + // Couldn't load API key - so we need to treat it as not generated, and therefore unauthorized + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED); + } + ApiService.getInstance().setApiKey(apiKey); + } + return apiKey; } } diff --git a/src/main/java/org/qortal/api/domainmap/resource/DomainMapResource.java b/src/main/java/org/qortal/api/domainmap/resource/DomainMapResource.java new file mode 100644 index 00000000..27770449 --- /dev/null +++ b/src/main/java/org/qortal/api/domainmap/resource/DomainMapResource.java @@ -0,0 +1,58 @@ +package org.qortal.api.domainmap.resource; + +import io.swagger.v3.oas.annotations.tags.Tag; +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.arbitrary.ArbitraryDataRenderer; +import org.qortal.arbitrary.misc.Service; +import org.qortal.settings.Settings; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.core.Context; +import java.util.Map; + + +@Path("/") +@Tag(name = "Gateway") +public class DomainMapResource { + + @Context HttpServletRequest request; + @Context HttpServletResponse response; + @Context ServletContext context; + + + @GET + public HttpServletResponse getIndexByDomainMap() { + return this.getDomainMap("/"); + } + + @GET + @Path("{path:.*}") + public HttpServletResponse getPathByDomainMap(@PathParam("path") String inPath) { + return this.getDomainMap(inPath); + } + + private HttpServletResponse getDomainMap(String inPath) { + Map domainMap = Settings.getInstance().getSimpleDomainMap(); + if (domainMap != null && domainMap.containsKey(request.getServerName())) { + // Build synchronously, so that we don't need to make the summary API endpoints available over + // the domain map server. This means that there will be no loading screen, but this is potentially + // preferred in this situation anyway (e.g. to avoid confusing search engine robots). + return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", false, false); + } + return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found"); + } + + private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath, + String secret58, String prefix, boolean usePrefix, boolean async) { + + ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath, + secret58, prefix, usePrefix, async, request, response, context); + return renderer.render(); + } + +} diff --git a/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java b/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java new file mode 100644 index 00000000..cee1613f --- /dev/null +++ b/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java @@ -0,0 +1,126 @@ +package org.qortal.api.gateway.resource; + +import io.swagger.v3.oas.annotations.security.SecurityRequirement; +import io.swagger.v3.oas.annotations.tags.Tag; +import org.qortal.api.Security; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.arbitrary.ArbitraryDataReader; +import org.qortal.arbitrary.ArbitraryDataRenderer; +import org.qortal.arbitrary.ArbitraryDataResource; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.arbitrary.ArbitraryResourceStatus; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.*; +import javax.ws.rs.core.Context; + + +@Path("/") +@Tag(name = "Gateway") +public class GatewayResource { + + @Context HttpServletRequest request; + @Context HttpServletResponse response; + @Context ServletContext context; + + /** + * We need to allow resource status checking (and building) via the gateway, as the node's API port + * may not be forwarded and will almost certainly not be authenticated. Since gateways allow for + * all resources to be loaded except those that are blocked, there is no need for authentication. + */ + @GET + @Path("/arbitrary/resource/status/{service}/{name}") + public ArbitraryResourceStatus getDefaultResourceStatus(@PathParam("service") Service service, + @PathParam("name") String name, + @QueryParam("build") Boolean build) { + + return this.getStatus(service, name, null, build); + } + + @GET + @Path("/arbitrary/resource/status/{service}/{name}/{identifier}") + public ArbitraryResourceStatus getResourceStatus(@PathParam("service") Service service, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + @QueryParam("build") Boolean build) { + + return this.getStatus(service, name, identifier, build); + } + + private ArbitraryResourceStatus getStatus(Service service, String name, String identifier, Boolean build) { + + // If "build=true" has been specified in the query string, build the resource before returning its status + if (build != null && build == true) { + ArbitraryDataReader reader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, null); + try { + if (!reader.isBuilding()) { + reader.loadSynchronously(false); + } + } catch (Exception e) { + // No need to handle exception, as it will be reflected in the status + } + } + + ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier); + return resource.getStatus(); + } + + + @GET + public HttpServletResponse getRoot() { + return ArbitraryDataRenderer.getResponse(response, 200, ""); + } + + + @GET + @Path("{name}/{path:.*}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getPathByName(@PathParam("name") String name, + @PathParam("path") String inPath) { + // Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data + Security.disallowLoopbackRequests(request); + return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", true, true); + } + + @GET + @Path("{name}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getIndexByName(@PathParam("name") String name) { + // Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data + Security.disallowLoopbackRequests(request); + return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "", true, true); + } + + + // Optional /site alternative for backwards support + + @GET + @Path("/site/{name}/{path:.*}") + public HttpServletResponse getSitePathByName(@PathParam("name") String name, + @PathParam("path") String inPath) { + // Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data + Security.disallowLoopbackRequests(request); + return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "/site", true, true); + } + + @GET + @Path("/site/{name}") + public HttpServletResponse getSiteIndexByName(@PathParam("name") String name) { + // Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data + Security.disallowLoopbackRequests(request); + return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "/site", true, true); + } + + + private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath, + String secret58, String prefix, boolean usePrefix, boolean async) { + + ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath, + secret58, prefix, usePrefix, async, request, response, context); + return renderer.render(); + } + +} diff --git a/src/main/java/org/qortal/api/model/PeersSummary.java b/src/main/java/org/qortal/api/model/PeersSummary.java new file mode 100644 index 00000000..28788550 --- /dev/null +++ b/src/main/java/org/qortal/api/model/PeersSummary.java @@ -0,0 +1,15 @@ +package org.qortal.api.model; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + +@XmlAccessorType(XmlAccessType.FIELD) +public class PeersSummary { + + public int inboundConnections; + public int outboundConnections; + + public PeersSummary() { + } + +} diff --git a/src/main/java/org/qortal/api/resource/AdminResource.java b/src/main/java/org/qortal/api/resource/AdminResource.java index 1d2c8bde..1d127b93 100644 --- a/src/main/java/org/qortal/api/resource/AdminResource.java +++ b/src/main/java/org/qortal/api/resource/AdminResource.java @@ -39,12 +39,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.appender.RollingFileAppender; +import org.checkerframework.checker.units.qual.A; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; -import org.qortal.api.ApiError; -import org.qortal.api.ApiErrors; -import org.qortal.api.ApiExceptionFactory; -import org.qortal.api.Security; +import org.qortal.api.*; import org.qortal.api.model.ActivitySummary; import org.qortal.api.model.NodeInfo; import org.qortal.api.model.NodeStatus; @@ -80,7 +78,8 @@ public class AdminResource { @Path("/unused") @Parameter(in = ParameterIn.PATH, name = "assetid", description = "Asset ID, 0 is native coin", schema = @Schema(type = "integer")) @Parameter(in = ParameterIn.PATH, name = "otherassetid", description = "Asset ID, 0 is native coin", schema = @Schema(type = "integer")) - @Parameter(in = ParameterIn.PATH, name = "address", description = "an account address", example = "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v") + @Parameter(in = ParameterIn.PATH, name = "address", description = "An account address", example = "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v") + @Parameter(in = ParameterIn.PATH, name = "path", description = "Local path to folder containing the files", schema = @Schema(type = "String", defaultValue = "/Users/user/Documents/MyStaticWebsite")) @Parameter(in = ParameterIn.QUERY, name = "count", description = "Maximum number of entries to return, 0 means none", schema = @Schema(type = "integer", defaultValue = "20")) @Parameter(in = ParameterIn.QUERY, name = "limit", description = "Maximum number of entries to return, 0 means unlimited", schema = @Schema(type = "integer", defaultValue = "20")) @Parameter(in = ParameterIn.QUERY, name = "offset", description = "Starting entry in results, 0 is first entry", schema = @Schema(type = "integer")) @@ -716,4 +715,40 @@ public class AdminResource { } } + + @POST + @Path("/apikey/generate") + @Operation( + summary = "Generate an API key", + description = "This request is unauthenticated if no API key has been generated yet. " + + "If an API key already exists, it needs to be passed as a header and this endpoint " + + "will then generate a new key which replaces the existing one.", + responses = { + @ApiResponse( + description = "API key string", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string")) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String generateApiKey() { + ApiKey apiKey = Security.getApiKey(request); + + // If the API key is already generated, we need to authenticate this request + if (apiKey.generated() && apiKey.exists()) { + Security.checkApiCallAllowed(request); + } + + // Not generated yet - so we are safe to generate one + // FUTURE: we may want to restrict this to local/loopback only? + + try { + apiKey.generate(); + } catch (IOException e) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Unable to generate API key"); + } + + return apiKey.toString(); + } + } diff --git a/src/main/java/org/qortal/api/resource/ArbitraryResource.java b/src/main/java/org/qortal/api/resource/ArbitraryResource.java index 26604318..57d39867 100644 --- a/src/main/java/org/qortal/api/resource/ArbitraryResource.java +++ b/src/main/java/org/qortal/api/resource/ArbitraryResource.java @@ -1,5 +1,6 @@ package org.qortal.api.resource; +import com.google.common.primitives.Bytes; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.media.ArraySchema; @@ -7,28 +8,41 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; +import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; -import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.QueryParam; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.*; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; -import org.qortal.api.ApiError; -import org.qortal.api.ApiErrors; -import org.qortal.api.ApiException; -import org.qortal.api.ApiExceptionFactory; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.bouncycastle.util.encoders.Base64; +import org.qortal.api.*; import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.*; +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.Controller; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.data.account.AccountData; +import org.qortal.data.arbitrary.ArbitraryResourceInfo; +import org.qortal.data.arbitrary.ArbitraryResourceNameInfo; +import org.qortal.data.arbitrary.ArbitraryResourceStatus; +import org.qortal.data.naming.NameData; import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.TransactionData; -import org.qortal.data.transaction.ArbitraryTransactionData.DataType; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; @@ -39,15 +53,172 @@ import org.qortal.transaction.Transaction.TransactionType; import org.qortal.transaction.Transaction.ValidationResult; import org.qortal.transform.TransformationException; import org.qortal.transform.transaction.ArbitraryTransactionTransformer; +import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; +import org.qortal.utils.ZipUtils; @Path("/arbitrary") @Tag(name = "Arbitrary") public class ArbitraryResource { - @Context - HttpServletRequest request; - + private static final Logger LOGGER = LogManager.getLogger(ArbitraryResource.class); + + @Context HttpServletRequest request; + @Context HttpServletResponse response; + @Context ServletContext context; + + @GET + @Path("/resources") + @Operation( + summary = "List arbitrary resources available on chain, optionally filtered by service and identifier", + description = "- If the identifier parameter is missing or empty, it will return an unfiltered list of all possible identifiers.\n" + + "- If an identifier is specified, only resources with a matching identifier will be returned.\n" + + "- If default is set to true, only resources without identifiers will be returned.", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceInfo.class)) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + public List getResources( + @QueryParam("service") Service service, + @QueryParam("identifier") String identifier, + @Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource, + @Parameter(ref = "limit") @QueryParam("limit") Integer limit, + @Parameter(ref = "offset") @QueryParam("offset") Integer offset, + @Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse, + @Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) { + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Treat empty identifier as null + if (identifier != null && identifier.isEmpty()) { + identifier = null; + } + + // Ensure that "default" and "identifier" parameters cannot coexist + boolean defaultRes = Boolean.TRUE.equals(defaultResource); + if (defaultRes == true && identifier != null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource"); + } + + List resources = repository.getArbitraryRepository() + .getArbitraryResources(service, identifier, null, defaultRes, limit, offset, reverse); + + if (resources == null) { + return new ArrayList<>(); + } + + if (includeStatus != null && includeStatus == true) { + resources = this.addStatusToResources(resources); + } + + return resources; + + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @GET + @Path("/resources/names") + @Operation( + summary = "List arbitrary resources available on chain, grouped by creator's name", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceInfo.class)) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + public List getResourcesGroupedByName( + @QueryParam("service") Service service, + @QueryParam("identifier") String identifier, + @Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource, + @Parameter(ref = "limit") @QueryParam("limit") Integer limit, + @Parameter(ref = "offset") @QueryParam("offset") Integer offset, + @Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse, + @Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) { + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Treat empty identifier as null + if (identifier != null && identifier.isEmpty()) { + identifier = null; + } + + // Ensure that "default" and "identifier" parameters cannot coexist + boolean defaultRes = Boolean.TRUE.equals(defaultResource); + if (defaultRes == true && identifier != null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource"); + } + + List creatorNames = repository.getArbitraryRepository() + .getArbitraryResourceCreatorNames(service, identifier, defaultRes, limit, offset, reverse); + + for (ArbitraryResourceNameInfo creatorName : creatorNames) { + String name = creatorName.name; + if (name != null) { + List resources = repository.getArbitraryRepository() + .getArbitraryResources(service, identifier, name, defaultRes, null, null, reverse); + + if (includeStatus != null && includeStatus == true) { + resources = this.addStatusToResources(resources); + } + creatorName.resources = resources; + } + } + + return creatorNames; + + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @GET + @Path("/resource/status/{service}/{name}") + @Operation( + summary = "Get status of arbitrary resource with supplied service and name", + description = "If build is set to true, the resource will be built synchronously before returning the status.", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceStatus.class)) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public ArbitraryResourceStatus getDefaultResourceStatus(@PathParam("service") Service service, + @PathParam("name") String name, + @QueryParam("build") Boolean build) { + + Security.requirePriorAuthorizationOrApiKey(request, name, service, null); + return this.getStatus(service, name, null, build); + } + + @GET + @Path("/resource/status/{service}/{name}/{identifier}") + @Operation( + summary = "Get status of arbitrary resource with supplied service, name and identifier", + description = "If build is set to true, the resource will be built synchronously before returning the status.", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceStatus.class)) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public ArbitraryResourceStatus getResourceStatus(@PathParam("service") Service service, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + @QueryParam("build") Boolean build) { + + Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier); + return this.getStatus(service, name, identifier, build); + } + + @GET @Path("/search") @Operation( @@ -71,7 +242,9 @@ public class ArbitraryResource { }) public List searchTransactions(@QueryParam("startBlock") Integer startBlock, @QueryParam("blockLimit") Integer blockLimit, @QueryParam("txGroupId") Integer txGroupId, - @QueryParam("service") Integer service, @QueryParam("address") String address, @Parameter( + @QueryParam("service") Service service, + @QueryParam("name") String name, + @QueryParam("address") String address, @Parameter( description = "whether to include confirmed, unconfirmed or both", required = true ) @QueryParam("confirmationStatus") ConfirmationStatus confirmationStatus, @Parameter( @@ -93,69 +266,15 @@ public class ArbitraryResource { txTypes.add(TransactionType.ARBITRARY); try (final Repository repository = RepositoryManager.getRepository()) { - List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(startBlock, blockLimit, txGroupId, txTypes, - service, address, confirmationStatus, limit, offset, reverse); + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(startBlock, blockLimit, txGroupId, txTypes, + service, name, address, confirmationStatus, limit, offset, reverse); // Expand signatures to transactions - List transactions = new ArrayList(signatures.size()); + List transactions = new ArrayList<>(signatures.size()); for (byte[] signature : signatures) transactions.add(repository.getTransactionRepository().fromSignature(signature)); return transactions; - } catch (ApiException e) { - throw e; - } catch (DataException e) { - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); - } - } - - @GET - @Path("/raw/{signature}") - @Operation( - summary = "Fetch raw data associated with passed transaction signature", - responses = { - @ApiResponse( - description = "raw data", - content = @Content( - schema = @Schema(type = "string", format = "byte"), - mediaType = MediaType.APPLICATION_OCTET_STREAM - ) - ) - } - ) - @ApiErrors({ - ApiError.INVALID_SIGNATURE, ApiError.REPOSITORY_ISSUE, ApiError.TRANSACTION_INVALID - }) - public byte[] fetchRawData(@PathParam("signature") String signature58) { - // Decode signature - byte[] signature; - try { - signature = Base58.decode(signature58); - } catch (NumberFormatException e) { - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_SIGNATURE, e); - } - - try (final Repository repository = RepositoryManager.getRepository()) { - TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); - - if (transactionData == null || transactionData.getType() != TransactionType.ARBITRARY) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_SIGNATURE); - - ArbitraryTransactionData arbitraryTxData = (ArbitraryTransactionData) transactionData; - - // We're really expecting to only fetch the data's hash from repository - if (arbitraryTxData.getDataType() != DataType.DATA_HASH) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSACTION_INVALID); - - ArbitraryTransaction arbitraryTx = new ArbitraryTransaction(repository, arbitraryTxData); - - // For now, we only allow locally stored data - if (!arbitraryTx.isDataLocal()) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSACTION_INVALID); - - return arbitraryTx.fetchData(); - } catch (ApiException e) { - throw e; } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -209,4 +328,726 @@ public class ArbitraryResource { } } -} \ No newline at end of file + @GET + @Path("/relaymode") + @Operation( + summary = "Returns whether relay mode is enabled or not", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + public boolean getRelayMode() { + Security.checkApiCallAllowed(request); + + return Settings.getInstance().isRelayModeEnabled(); + } + + @GET + @Path("/hosted/transactions") + @Operation( + summary = "List arbitrary transactions hosted by this node", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryTransactionData.class)) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + public List getHostedTransactions() { + Security.checkApiCallAllowed(request); + + try (final Repository repository = RepositoryManager.getRepository()) { + + List hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository); + + return hostedTransactions; + + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @GET + @Path("/hosted/resources") + @Operation( + summary = "List arbitrary resources hosted by this node", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceInfo.class)) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + public List getHostedResources( + @Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) { + Security.checkApiCallAllowed(request); + + List resources = new ArrayList<>(); + + try (final Repository repository = RepositoryManager.getRepository()) { + + List transactionDataList = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository); + for (ArbitraryTransactionData transactionData : transactionDataList) { + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + if (transaction.isDataLocal()) { + String name = transactionData.getName(); + Service service = transactionData.getService(); + String identifier = transactionData.getIdentifier(); + + if (transactionData.getName() != null) { + List transactionResources = repository.getArbitraryRepository() + .getArbitraryResources(service, identifier, name, (identifier == null), null, null, false); + if (transactionResources != null) { + resources.addAll(transactionResources); + } + } + } + } + + if (includeStatus != null && includeStatus == true) { + resources = this.addStatusToResources(resources); + } + + return resources; + + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @DELETE + @Path("/resource/{service}/{name}/{identifier}") + @Operation( + summary = "Delete arbitrary resource with supplied service, name and identifier", + responses = { + @ApiResponse( + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string")) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public boolean deleteResource(@PathParam("service") Service service, + @PathParam("name") String name, + @PathParam("identifier") String identifier) { + + Security.checkApiCallAllowed(request); + ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier); + return resource.delete(); + } + + @POST + @Path("/compute") + @Operation( + summary = "Compute nonce for raw, unsigned ARBITRARY transaction", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", + description = "raw, unsigned ARBITRARY transaction in base58 encoding", + example = "raw transaction base58" + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @ApiErrors({ApiError.TRANSACTION_INVALID, ApiError.INVALID_DATA, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") + public String computeNonce(String rawBytes58) { + Security.checkApiCallAllowed(request); + + try (final Repository repository = RepositoryManager.getRepository()) { + byte[] rawBytes = Base58.decode(rawBytes58); + // We're expecting unsigned transaction, so append empty signature prior to decoding + rawBytes = Bytes.concat(rawBytes, new byte[TransactionTransformer.SIGNATURE_LENGTH]); + + TransactionData transactionData = TransactionTransformer.fromBytes(rawBytes); + if (transactionData == null) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA); + + if (transactionData.getType() != TransactionType.ARBITRARY) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA); + + ArbitraryTransaction arbitraryTransaction = (ArbitraryTransaction) Transaction.fromData(repository, transactionData); + + // Quicker validity check first before we compute nonce + ValidationResult result = arbitraryTransaction.isValid(); + if (result != ValidationResult.OK) + throw TransactionsResource.createTransactionInvalidException(request, result); + + LOGGER.info("Computing nonce..."); + arbitraryTransaction.computeNonce(); + + // Re-check, but ignores signature + result = arbitraryTransaction.isValidUnconfirmed(); + if (result != ValidationResult.OK) + throw TransactionsResource.createTransactionInvalidException(request, result); + + // Strip zeroed signature + transactionData.setSignature(null); + + byte[] bytes = ArbitraryTransactionTransformer.toBytes(transactionData); + return Base58.encode(bytes); + } catch (TransformationException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e); + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + + @GET + @Path("/{service}/{name}") + @Operation( + summary = "Fetch raw data from file with supplied service, name, and relative path", + description = "An optional rebuild boolean can be supplied. If true, any existing cached data will be invalidated.", + responses = { + @ApiResponse( + description = "Path to file structure containing requested data", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public HttpServletResponse get(@PathParam("service") Service service, + @PathParam("name") String name, + @QueryParam("filepath") String filepath, + @QueryParam("rebuild") boolean rebuild) { + Security.checkApiCallAllowed(request); + + return this.download(service, name, null, filepath, rebuild); + } + + @GET + @Path("/{service}/{name}/{identifier}") + @Operation( + summary = "Fetch raw data from file with supplied service, name, identifier, and relative path", + description = "An optional rebuild boolean can be supplied. If true, any existing cached data will be invalidated.", + responses = { + @ApiResponse( + description = "Path to file structure containing requested data", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public HttpServletResponse get(@PathParam("service") Service service, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + @QueryParam("filepath") String filepath, + @QueryParam("rebuild") boolean rebuild) { + Security.checkApiCallAllowed(request); + + return this.download(service, name, identifier, filepath, rebuild); + } + + + + // Upload data at supplied path + + @POST + @Path("/{service}/{name}") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on a user-supplied path", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", example = "/Users/user/Documents/MyDirectoryOrFile" + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String post(@PathParam("service") String serviceString, + @PathParam("name") String name, + String path) { + Security.checkApiCallAllowed(request); + + if (path == null || path.isEmpty()) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Path not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false); + } + + @POST + @Path("/{service}/{name}/{identifier}") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on a user-supplied path", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", example = "/Users/user/Documents/MyDirectoryOrFile" + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String post(@PathParam("service") String serviceString, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + String path) { + Security.checkApiCallAllowed(request); + + if (path == null || path.isEmpty()) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Path not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false); + } + + + + // Upload base64-encoded data + + @POST + @Path("/{service}/{name}/base64") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on user-supplied base64 encoded data", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_OCTET_STREAM, + schema = @Schema(type = "string", format = "byte") + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postBase64EncodedData(@PathParam("service") String serviceString, + @PathParam("name") String name, + String base64) { + Security.checkApiCallAllowed(request); + + if (base64 == null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false); + } + + @POST + @Path("/{service}/{name}/{identifier}/base64") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on user supplied base64 encoded data", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_OCTET_STREAM, + schema = @Schema(type = "string", format = "byte") + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postBase64EncodedData(@PathParam("service") String serviceString, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + String base64) { + Security.checkApiCallAllowed(request); + + if (base64 == null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false); + } + + + // Upload zipped data + + @POST + @Path("/{service}/{name}/zip") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on user-supplied zip file, encoded as base64", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_OCTET_STREAM, + schema = @Schema(type = "string", format = "byte") + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postZippedData(@PathParam("service") String serviceString, + @PathParam("name") String name, + String base64Zip) { + Security.checkApiCallAllowed(request); + + if (base64Zip == null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true); + } + + @POST + @Path("/{service}/{name}/{identifier}/zip") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on user supplied zip file, encoded as base64", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_OCTET_STREAM, + schema = @Schema(type = "string", format = "byte") + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postZippedData(@PathParam("service") String serviceString, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + String base64Zip) { + Security.checkApiCallAllowed(request); + + if (base64Zip == null) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true); + } + + + + // Upload plain-text data in string form + + @POST + @Path("/{service}/{name}/string") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on a user-supplied string", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", example = "{\"title\":\"\", \"description\":\"\", \"tags\":[]}" + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postString(@PathParam("service") String serviceString, + @PathParam("name") String name, + String string) { + Security.checkApiCallAllowed(request); + + if (string == null || string.isEmpty()) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data string not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false); + } + + @POST + @Path("/{service}/{name}/{identifier}/string") + @Operation( + summary = "Build raw, unsigned, ARBITRARY transaction, based on user supplied string", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", example = "{\"title\":\"\", \"description\":\"\", \"tags\":[]}" + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, unsigned, ARBITRARY transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String postString(@PathParam("service") String serviceString, + @PathParam("name") String name, + @PathParam("identifier") String identifier, + String string) { + Security.checkApiCallAllowed(request); + + if (string == null || string.isEmpty()) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data string not supplied"); + } + + return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false); + } + + + // Shared methods + + private String upload(Service service, String name, String identifier, String path, String string, String base64, boolean zipped) { + // Fetch public key from registered name + try (final Repository repository = RepositoryManager.getRepository()) { + NameData nameData = repository.getNameRepository().fromName(name); + if (nameData == null) { + String error = String.format("Name not registered: %s", name); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, error); + } + + if (!Controller.getInstance().isUpToDate()) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC); + } + + AccountData accountData = repository.getAccountRepository().getAccount(nameData.getOwner()); + if (accountData == null) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN); + } + byte[] publicKey = accountData.getPublicKey(); + String publicKey58 = Base58.encode(publicKey); + + if (path == null) { + // See if we have a string instead + if (string != null) { + File tempFile = File.createTempFile("qortal-", ".tmp"); + tempFile.deleteOnExit(); + BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile.toPath().toString())); + writer.write(string); + writer.newLine(); + writer.close(); + path = tempFile.toPath().toString(); + } + // ... or base64 encoded raw data + else if (base64 != null) { + File tempFile = File.createTempFile("qortal-", ".tmp"); + tempFile.deleteOnExit(); + Files.write(tempFile.toPath(), Base64.decode(base64)); + path = tempFile.toPath().toString(); + } + else { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Missing path or data string"); + } + } + + if (zipped) { + // Unzip the file + java.nio.file.Path tempDirectory = Files.createTempDirectory("qortal-"); + tempDirectory.toFile().deleteOnExit(); + LOGGER.info("Unzipping..."); + ZipUtils.unzip(path, tempDirectory.toString()); + path = tempDirectory.toString(); + + // Handle directories slightly differently to files + if (tempDirectory.toFile().isDirectory()) { + // The actual data will be in a randomly-named subfolder of tempDirectory + // Remove hidden folders, i.e. starting with "_", as some systems can add them, e.g. "__MACOSX" + String[] files = tempDirectory.toFile().list((parent, child) -> !child.startsWith("_")); + if (files.length == 1) { // Single directory or file only + path = Paths.get(tempDirectory.toString(), files[0]).toString(); + } + } + } + + try { + ArbitraryDataTransactionBuilder transactionBuilder = new ArbitraryDataTransactionBuilder( + repository, publicKey58, Paths.get(path), name, null, service, identifier + ); + + transactionBuilder.build(); + // Don't compute nonce - this is done by the client (or via POST /arbitrary/compute) + ArbitraryTransactionData transactionData = transactionBuilder.getArbitraryTransactionData(); + return Base58.encode(ArbitraryTransactionTransformer.toBytes(transactionData)); + + } catch (DataException | TransformationException | IllegalStateException e) { + LOGGER.info("Unable to upload data: {}", e.getMessage()); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage()); + } + + } catch (DataException | IOException e) { + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage()); + } + } + + private HttpServletResponse download(Service service, String name, String identifier, String filepath, boolean rebuild) { + + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier); + try { + + int attempts = 0; + + // Loop until we have data + while (!Controller.isStopping()) { + attempts++; + if (!arbitraryDataReader.isBuilding()) { + try { + arbitraryDataReader.loadSynchronously(rebuild); + break; + } catch (MissingDataException e) { + if (attempts > 5) { + // Give up after 5 attempts + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later."); + } + } + } + Thread.sleep(3000L); + } + java.nio.file.Path outputPath = arbitraryDataReader.getFilePath(); + + if (filepath == null || filepath.isEmpty()) { + // No file path supplied - so check if this is a single file resource + String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal"); + if (files.length == 1) { + // This is a single file resource + filepath = files[0]; + } + } + + // TODO: limit file size that can be read into memory + java.nio.file.Path path = Paths.get(outputPath.toString(), filepath); + if (!Files.exists(path)) { + String message = String.format("No file exists at filepath: %s", filepath); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message); + } + byte[] data = Files.readAllBytes(path); + response.setContentType(context.getMimeType(path.toString())); + response.setContentLength(data.length); + response.getOutputStream().write(data); + + return response; + } catch (Exception e) { + LOGGER.info(String.format("Unable to load %s %s: %s", service, name, e.getMessage())); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage()); + } + } + + + private ArbitraryResourceStatus getStatus(Service service, String name, String identifier, Boolean build) { + + // If "build=true" has been specified in the query string, build the resource before returning its status + if (build != null && build == true) { + ArbitraryDataReader reader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, null); + try { + if (!reader.isBuilding()) { + reader.loadSynchronously(false); + } + } catch (Exception e) { + // No need to handle exception, as it will be reflected in the status + } + } + + ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier); + return resource.getStatus(); + } + + private List addStatusToResources(List resources) { + // Determine and add the status of each resource + List updatedResources = new ArrayList<>(); + for (ArbitraryResourceInfo resourceInfo : resources) { + ArbitraryDataResource resource = new ArbitraryDataResource(resourceInfo.name, ResourceIdType.NAME, + resourceInfo.service, resourceInfo.identifier); + ArbitraryResourceStatus status = resource.getStatus(); + if (status != null) { + resourceInfo.status = status; + } + updatedResources.add(resourceInfo); + } + return updatedResources; + } +} diff --git a/src/main/java/org/qortal/api/resource/BootstrapResource.java b/src/main/java/org/qortal/api/resource/BootstrapResource.java index 9b9b7f2a..2832f8bb 100644 --- a/src/main/java/org/qortal/api/resource/BootstrapResource.java +++ b/src/main/java/org/qortal/api/resource/BootstrapResource.java @@ -4,6 +4,7 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -43,6 +44,7 @@ public class BootstrapResource { ) } ) + @SecurityRequirement(name = "apiKey") public String createBootstrap() { Security.checkApiCallAllowed(request); @@ -77,6 +79,7 @@ public class BootstrapResource { ) } ) + @SecurityRequirement(name = "apiKey") public boolean validateBootstrap() { Security.checkApiCallAllowed(request); diff --git a/src/main/java/org/qortal/api/resource/CrossChainBitcoinACCTv1Resource.java b/src/main/java/org/qortal/api/resource/CrossChainBitcoinACCTv1Resource.java index 20a27241..df368970 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainBitcoinACCTv1Resource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainBitcoinACCTv1Resource.java @@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import java.util.Arrays; @@ -79,6 +80,7 @@ public class CrossChainBitcoinACCTv1Resource { } ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_DATA, ApiError.INVALID_REFERENCE, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public String buildTrade(CrossChainBuildRequest tradeRequest) { Security.checkApiCallAllowed(request); @@ -174,6 +176,7 @@ public class CrossChainBitcoinACCTv1Resource { } ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public String buildTradeMessage(CrossChainTradeRequest tradeRequest) { Security.checkApiCallAllowed(request); @@ -257,6 +260,7 @@ public class CrossChainBitcoinACCTv1Resource { } ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public String buildRedeemMessage(CrossChainDualSecretRequest secretRequest) { Security.checkApiCallAllowed(request); @@ -360,4 +364,4 @@ public class CrossChainBitcoinACCTv1Resource { } } -} \ No newline at end of file +} diff --git a/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java index 2c1c6991..ecbaf840 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java @@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import java.util.List; @@ -56,6 +57,7 @@ public class CrossChainBitcoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String getBitcoinWalletBalance(String key58) { Security.checkApiCallAllowed(request); @@ -94,6 +96,7 @@ public class CrossChainBitcoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public List getBitcoinWalletTransactions(String key58) { Security.checkApiCallAllowed(request); @@ -130,6 +133,7 @@ public class CrossChainBitcoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String sendBitcoin(BitcoinSendRequest bitcoinSendRequest) { Security.checkApiCallAllowed(request); @@ -164,4 +168,4 @@ public class CrossChainBitcoinResource { return spendTransaction.getTxId().toString(); } -} \ No newline at end of file +} diff --git a/src/main/java/org/qortal/api/resource/CrossChainDogecoinACCTv1Resource.java b/src/main/java/org/qortal/api/resource/CrossChainDogecoinACCTv1Resource.java index 1645f89b..b13c6644 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainDogecoinACCTv1Resource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainDogecoinACCTv1Resource.java @@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import org.qortal.account.PrivateKeyAccount; import org.qortal.api.ApiError; @@ -67,6 +68,7 @@ public class CrossChainDogecoinACCTv1Resource { } ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public boolean buildRedeemMessage(CrossChainSecretRequest secretRequest) { Security.checkApiCallAllowed(request); diff --git a/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java index bceda7e9..d6b186d0 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java @@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import org.bitcoinj.core.Transaction; import org.qortal.api.ApiError; @@ -54,6 +55,7 @@ public class CrossChainDogecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String getDogecoinWalletBalance(String key58) { Security.checkApiCallAllowed(request); @@ -92,6 +94,7 @@ public class CrossChainDogecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public List getDogecoinWalletTransactions(String key58) { Security.checkApiCallAllowed(request); @@ -128,6 +131,7 @@ public class CrossChainDogecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String sendBitcoin(DogecoinSendRequest dogecoinSendRequest) { Security.checkApiCallAllowed(request); diff --git a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java index 46d7ebc6..e0bca8d3 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java @@ -4,6 +4,7 @@ import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import java.math.BigDecimal; @@ -105,6 +106,7 @@ public class CrossChainHtlcResource { } ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) + @SecurityRequirement(name = "apiKey") public CrossChainBitcoinyHTLCStatus checkHtlcStatus(@PathParam("blockchain") String blockchainName, @PathParam("refundPKH") String refundPKH, @PathParam("locktime") int lockTime, @@ -188,6 +190,7 @@ public class CrossChainHtlcResource { } ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) + @SecurityRequirement(name = "apiKey") public boolean redeemHtlc(@PathParam("ataddress") String atAddress) { Security.checkApiCallAllowed(request); @@ -246,6 +249,7 @@ public class CrossChainHtlcResource { } ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) + @SecurityRequirement(name = "apiKey") public boolean redeemAllHtlc() { Security.checkApiCallAllowed(request); boolean success = false; @@ -430,6 +434,7 @@ public class CrossChainHtlcResource { } ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) + @SecurityRequirement(name = "apiKey") public boolean refundHtlc(@PathParam("ataddress") String atAddress) { Security.checkApiCallAllowed(request); @@ -478,6 +483,7 @@ public class CrossChainHtlcResource { } ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) + @SecurityRequirement(name = "apiKey") public boolean refundAllHtlc() { Security.checkApiCallAllowed(request); boolean success = false; diff --git a/src/main/java/org/qortal/api/resource/CrossChainLitecoinACCTv1Resource.java b/src/main/java/org/qortal/api/resource/CrossChainLitecoinACCTv1Resource.java index 04923133..38cb763e 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainLitecoinACCTv1Resource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainLitecoinACCTv1Resource.java @@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import org.qortal.account.PrivateKeyAccount; import org.qortal.api.ApiError; @@ -72,6 +73,7 @@ public class CrossChainLitecoinACCTv1Resource { } ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public boolean buildRedeemMessage(CrossChainSecretRequest secretRequest) { Security.checkApiCallAllowed(request); diff --git a/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java index 8883f964..6055942a 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java @@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import java.util.List; @@ -56,6 +57,7 @@ public class CrossChainLitecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String getLitecoinWalletBalance(String key58) { Security.checkApiCallAllowed(request); @@ -94,6 +96,7 @@ public class CrossChainLitecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public List getLitecoinWalletTransactions(String key58) { Security.checkApiCallAllowed(request); @@ -130,6 +133,7 @@ public class CrossChainLitecoinResource { } ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE}) + @SecurityRequirement(name = "apiKey") public String sendBitcoin(LitecoinSendRequest litecoinSendRequest) { Security.checkApiCallAllowed(request); @@ -164,4 +168,4 @@ public class CrossChainLitecoinResource { return spendTransaction.getTxId().toString(); } -} \ No newline at end of file +} diff --git a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java index 73becab9..1a098d5e 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java @@ -7,6 +7,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import java.util.List; @@ -69,6 +70,7 @@ public class CrossChainTradeBotResource { } ) @ApiErrors({ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public List getTradeBotStates( @Parameter( description = "Limit to specific blockchain", @@ -110,6 +112,7 @@ public class CrossChainTradeBotResource { ) @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.INSUFFICIENT_BALANCE, ApiError.REPOSITORY_ISSUE, ApiError.ORDER_SIZE_TOO_SMALL}) @SuppressWarnings("deprecation") + @SecurityRequirement(name = "apiKey") public String tradeBotCreator(TradeBotCreateRequest tradeBotCreateRequest) { Security.checkApiCallAllowed(request); @@ -179,6 +182,7 @@ public class CrossChainTradeBotResource { ) @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, ApiError.REPOSITORY_ISSUE}) @SuppressWarnings("deprecation") + @SecurityRequirement(name = "apiKey") public String tradeBotResponder(TradeBotRespondRequest tradeBotRespondRequest) { Security.checkApiCallAllowed(request); @@ -260,6 +264,7 @@ public class CrossChainTradeBotResource { } ) @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") public String tradeBotDelete(String tradePrivateKey58) { Security.checkApiCallAllowed(request); diff --git a/src/main/java/org/qortal/api/resource/ListsResource.java b/src/main/java/org/qortal/api/resource/ListsResource.java index 66088ba0..485dbb84 100644 --- a/src/main/java/org/qortal/api/resource/ListsResource.java +++ b/src/main/java/org/qortal/api/resource/ListsResource.java @@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.parameters.RequestBody; import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.tags.Tag; import org.qortal.api.*; @@ -31,12 +32,10 @@ public class ListsResource { HttpServletRequest request; - /* Address blacklist */ - @POST - @Path("/blacklist/addresses") + @Path("/{listName}") @Operation( - summary = "Add one or more QORT addresses to the local blacklist", + summary = "Add items to a new or existing list", requestBody = @RequestBody( required = true, content = @Content( @@ -48,17 +47,23 @@ public class ListsResource { ), responses = { @ApiResponse( - description = "Returns true if all addresses were processed, false if any couldn't be " + + description = "Returns true if all items were processed, false if any couldn't be " + "processed, or an exception on failure. If false or an exception is returned, " + "the list will not be updated, and the request will need to be re-issued.", content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) ) } ) - @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) - public String addAddressesToBlacklist(ListRequest listRequest) { + @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") + public String addItemstoList(@PathParam("listName") String listName, + ListRequest listRequest) { Security.checkApiCallAllowed(request); + if (listName == null) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); + } + if (listRequest == null || listRequest.items == null) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); } @@ -66,51 +71,33 @@ public class ListsResource { int successCount = 0; int errorCount = 0; - try (final Repository repository = RepositoryManager.getRepository()) { + for (String item : listRequest.items) { - for (String address : listRequest.items) { - - if (!Crypto.isValidAddress(address)) { - errorCount++; - continue; - } - - AccountData accountData = repository.getAccountRepository().getAccount(address); - // Not found? - if (accountData == null) { - errorCount++; - continue; - } - - // Valid address, so go ahead and blacklist it - boolean success = ResourceListManager.getInstance().addToList("blacklist", "addresses", address, false); - if (success) { - successCount++; - } - else { - errorCount++; - } + boolean success = ResourceListManager.getInstance().addToList(listName, item, false); + if (success) { + successCount++; + } + else { + errorCount++; } - } catch (DataException e) { - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } if (successCount > 0 && errorCount == 0) { - // All were successful, so save the blacklist - ResourceListManager.getInstance().saveList("blacklist", "addresses"); + // All were successful, so save the list + ResourceListManager.getInstance().saveList(listName); return "true"; } else { // Something went wrong, so revert - ResourceListManager.getInstance().revertList("blacklist", "addresses"); + ResourceListManager.getInstance().revertList(listName); return "false"; } } @DELETE - @Path("/blacklist/addresses") + @Path("/{listName}") @Operation( - summary = "Remove one or more QORT addresses from the local blacklist", + summary = "Remove one or more items from a list", requestBody = @RequestBody( required = true, content = @Content( @@ -122,15 +109,17 @@ public class ListsResource { ), responses = { @ApiResponse( - description = "Returns true if all addresses were processed, false if any couldn't be " + + description = "Returns true if all items were processed, false if any couldn't be " + "processed, or an exception on failure. If false or an exception is returned, " + "the list will not be updated, and the request will need to be re-issued.", content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) ) } ) - @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) - public String removeAddressesFromBlacklist(ListRequest listRequest) { + @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") + public String removeItemsFromList(@PathParam("listName") String listName, + ListRequest listRequest) { Security.checkApiCallAllowed(request); if (listRequest == null || listRequest.items == null) { @@ -140,62 +129,46 @@ public class ListsResource { int successCount = 0; int errorCount = 0; - try (final Repository repository = RepositoryManager.getRepository()) { + for (String address : listRequest.items) { - for (String address : listRequest.items) { - - if (!Crypto.isValidAddress(address)) { - errorCount++; - continue; - } - - AccountData accountData = repository.getAccountRepository().getAccount(address); - // Not found? - if (accountData == null) { - errorCount++; - continue; - } - - // Valid address, so go ahead and blacklist it - // Don't save as we will do this at the end of the process - boolean success = ResourceListManager.getInstance().removeFromList("blacklist", "addresses", address, false); - if (success) { - successCount++; - } - else { - errorCount++; - } + // Attempt to remove the item + // Don't save as we will do this at the end of the process + boolean success = ResourceListManager.getInstance().removeFromList(listName, address, false); + if (success) { + successCount++; + } + else { + errorCount++; } - } catch (DataException e) { - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } if (successCount > 0 && errorCount == 0) { - // All were successful, so save the blacklist - ResourceListManager.getInstance().saveList("blacklist", "addresses"); + // All were successful, so save the list + ResourceListManager.getInstance().saveList(listName); return "true"; } else { // Something went wrong, so revert - ResourceListManager.getInstance().revertList("blacklist", "addresses"); + ResourceListManager.getInstance().revertList(listName); return "false"; } } @GET - @Path("/blacklist/addresses") + @Path("/{listName}") @Operation( - summary = "Fetch the list of blacklisted addresses", + summary = "Fetch all items in a list", responses = { @ApiResponse( - description = "A JSON array of addresses", + description = "A JSON array of items", content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = String.class))) ) } ) - public String getAddressBlacklist() { + @SecurityRequirement(name = "apiKey") + public String getItemsInList(@PathParam("listName") String listName) { Security.checkApiCallAllowed(request); - return ResourceListManager.getInstance().getJSONStringForList("blacklist", "addresses"); + return ResourceListManager.getInstance().getJSONStringForList(listName); } } diff --git a/src/main/java/org/qortal/api/resource/PeersResource.java b/src/main/java/org/qortal/api/resource/PeersResource.java index 244a1569..77c11b99 100644 --- a/src/main/java/org/qortal/api/resource/PeersResource.java +++ b/src/main/java/org/qortal/api/resource/PeersResource.java @@ -23,12 +23,9 @@ import javax.ws.rs.Path; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; -import org.qortal.api.ApiError; -import org.qortal.api.ApiErrors; -import org.qortal.api.ApiException; -import org.qortal.api.ApiExceptionFactory; -import org.qortal.api.Security; +import org.qortal.api.*; import org.qortal.api.model.ConnectedPeer; +import org.qortal.api.model.PeersSummary; import org.qortal.controller.Controller; import org.qortal.controller.Synchronizer; import org.qortal.controller.Synchronizer.SynchronizationResult; @@ -338,4 +335,39 @@ public class PeersResource { } } + @GET + @Path("/summary") + @Operation( + summary = "Returns total inbound and outbound connections for connected peers", + responses = { + @ApiResponse( + content = @Content( + mediaType = MediaType.APPLICATION_JSON, + array = @ArraySchema( + schema = @Schema( + implementation = PeersSummary.class + ) + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public PeersSummary peersSummary() { + Security.checkApiCallAllowed(request); + + PeersSummary peersSummary = new PeersSummary(); + + List connectedPeers = Network.getInstance().getConnectedPeers().stream().collect(Collectors.toList()); + for (Peer peer : connectedPeers) { + if (peer.isOutbound()) { + peersSummary.inboundConnections++; + } + else { + peersSummary.outboundConnections++; + } + } + return peersSummary; + } + } diff --git a/src/main/java/org/qortal/api/resource/RenderResource.java b/src/main/java/org/qortal/api/resource/RenderResource.java new file mode 100644 index 00000000..3a543ee4 --- /dev/null +++ b/src/main/java/org/qortal/api/resource/RenderResource.java @@ -0,0 +1,195 @@ +package org.qortal.api.resource; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.*; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import java.io.*; +import java.nio.file.Paths; +import java.util.Map; + +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.media.Content; +import io.swagger.v3.oas.annotations.media.Schema; +import io.swagger.v3.oas.annotations.parameters.RequestBody; +import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; +import io.swagger.v3.oas.annotations.tags.Tag; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.ApiError; +import org.qortal.api.ApiExceptionFactory; +import org.qortal.api.Security; +import org.qortal.arbitrary.misc.Service; +import org.qortal.arbitrary.*; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.controller.arbitrary.ArbitraryDataRenderManager; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.repository.DataException; +import org.qortal.settings.Settings; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.utils.Base58; + + +@Path("/render") +@Tag(name = "Render") +public class RenderResource { + + private static final Logger LOGGER = LogManager.getLogger(RenderResource.class); + + @Context HttpServletRequest request; + @Context HttpServletResponse response; + @Context ServletContext context; + + @POST + @Path("/preview") + @Operation( + summary = "Generate preview URL based on a user-supplied path and service", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", example = "/Users/user/Documents/MyStaticWebsite" + ) + ) + ), + responses = { + @ApiResponse( + description = "a temporary URL to preview the website", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @SecurityRequirement(name = "apiKey") + public String preview(String directoryPath) { + Security.checkApiCallAllowed(request); + Method method = Method.PUT; + Compression compression = Compression.ZIP; + + ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath), null, Service.WEBSITE, null, method, compression); + try { + arbitraryDataWriter.save(); + } catch (IOException | DataException | InterruptedException | MissingDataException e) { + LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage()); + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE); + } catch (RuntimeException e) { + LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage()); + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA); + } + + ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile(); + if (arbitraryDataFile != null) { + String digest58 = arbitraryDataFile.digest58(); + if (digest58 != null) { + return "http://localhost:12393/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret()); + } + } + return "Unable to generate preview URL"; + } + + @POST + @Path("/authorize/{resourceId}") + @SecurityRequirement(name = "apiKey") + public boolean authorizeResource(@PathParam("resourceId") String resourceId) { + Security.checkApiCallAllowed(request); + ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, null, null); + ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource); + return true; + } + + @POST + @Path("authorize/{service}/{resourceId}") + @SecurityRequirement(name = "apiKey") + public boolean authorizeResource(@PathParam("service") Service service, + @PathParam("resourceId") String resourceId) { + Security.checkApiCallAllowed(request); + ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, null); + ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource); + return true; + } + + @POST + @Path("authorize/{service}/{resourceId}/{identifier}") + @SecurityRequirement(name = "apiKey") + public boolean authorizeResource(@PathParam("service") Service service, + @PathParam("resourceId") String resourceId, + @PathParam("identifier") String identifier) { + Security.checkApiCallAllowed(request); + ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, identifier); + ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource); + return true; + } + + @GET + @Path("/signature/{signature}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature) { + Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null); + return this.get(signature, ResourceIdType.SIGNATURE, null, "/", null, "/render/signature", true, true); + } + + @GET + @Path("/signature/{signature}/{path:.*}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath) { + Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null); + return this.get(signature, ResourceIdType.SIGNATURE, null, inPath,null, "/render/signature", true, true); + } + + @GET + @Path("/hash/{hash}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58) { + Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null); + return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, "/", secret58, "/render/hash", true, false); + } + + @GET + @Path("/hash/{hash}/{path:.*}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath, + @QueryParam("secret") String secret58) { + Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null); + return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, inPath, secret58, "/render/hash", true, false); + } + + @GET + @Path("{service}/{name}/{path:.*}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getPathByName(@PathParam("service") Service service, + @PathParam("name") String name, + @PathParam("path") String inPath) { + Security.requirePriorAuthorization(request, name, service, null); + String prefix = String.format("/render/%s", service); + return this.get(name, ResourceIdType.NAME, service, inPath, null, prefix, true, true); + } + + @GET + @Path("{service}/{name}") + @SecurityRequirement(name = "apiKey") + public HttpServletResponse getIndexByName(@PathParam("service") Service service, + @PathParam("name") String name) { + Security.requirePriorAuthorization(request, name, service, null); + String prefix = String.format("/render/%s", service); + return this.get(name, ResourceIdType.NAME, service, "/", null, prefix, true, true); + } + + + + private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath, + String secret58, String prefix, boolean usePrefix, boolean async) { + + ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath, + secret58, prefix, usePrefix, async, request, response, context); + return renderer.render(); + } + +} diff --git a/src/main/java/org/qortal/api/resource/TransactionsResource.java b/src/main/java/org/qortal/api/resource/TransactionsResource.java index 585dac0b..30f242c4 100644 --- a/src/main/java/org/qortal/api/resource/TransactionsResource.java +++ b/src/main/java/org/qortal/api/resource/TransactionsResource.java @@ -348,7 +348,7 @@ public class TransactionsResource { try (final Repository repository = RepositoryManager.getRepository()) { List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(startBlock, blockLimit, txGroupId, - txTypes, null, address, confirmationStatus, limit, offset, reverse); + txTypes, null, null, address, confirmationStatus, limit, offset, reverse); // Expand signatures to transactions List transactions = new ArrayList<>(signatures.size()); @@ -418,32 +418,83 @@ public class TransactionsResource { } @POST - @Path("/sign") + @Path("/convert") @Operation( - summary = "Sign a raw, unsigned transaction", - requestBody = @RequestBody( - required = true, - content = @Content( - mediaType = MediaType.APPLICATION_JSON, - schema = @Schema( - implementation = SimpleTransactionSignRequest.class - ) - ) - ), - responses = { - @ApiResponse( - description = "raw, signed transaction encoded in Base58", - content = @Content( - mediaType = MediaType.TEXT_PLAIN, - schema = @Schema( - type = "string" + summary = "Convert transaction bytes into bytes for signing", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string", + description = "raw, unsigned transaction in base58 encoding", + example = "raw transaction base58" + ) ) - ) - ) - } + ), + responses = { + @ApiResponse( + description = "raw, unsigned transaction encoded in Base58, ready for signing", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } ) @ApiErrors({ - ApiError.NON_PRODUCTION, ApiError.INVALID_PRIVATE_KEY, ApiError.TRANSFORMATION_ERROR + ApiError.NON_PRODUCTION, ApiError.TRANSFORMATION_ERROR + }) + public String convertTransactionForSigning(String rawInputBytes58) { + byte[] rawInputBytes = Base58.decode(rawInputBytes58); + if (rawInputBytes.length == 0) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.JSON); + + try { + // Append null signature on the end before transformation + byte[] rawBytes = Bytes.concat(rawInputBytes, new byte[TransactionTransformer.SIGNATURE_LENGTH]); + + TransactionData transactionData = TransactionTransformer.fromBytes(rawBytes); + if (transactionData == null) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA); + + byte[] convertedBytes = TransactionTransformer.toBytesForSigning(transactionData); + + return Base58.encode(convertedBytes); + } catch (TransformationException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e); + } + } + + @POST + @Path("/sign") + @Operation( + summary = "Sign a raw, unsigned transaction", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_JSON, + schema = @Schema( + implementation = SimpleTransactionSignRequest.class + ) + ) + ), + responses = { + @ApiResponse( + description = "raw, signed transaction encoded in Base58", + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "string" + ) + ) + ) + } + ) + @ApiErrors({ + ApiError.NON_PRODUCTION, ApiError.INVALID_PRIVATE_KEY, ApiError.TRANSFORMATION_ERROR }) public String signTransaction(SimpleTransactionSignRequest signRequest) { if (Settings.getInstance().isApiRestricted()) diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java new file mode 100644 index 00000000..ffbf8fe3 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java @@ -0,0 +1,76 @@ +package org.qortal.arbitrary; + +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.arbitrary.misc.Service; +import org.qortal.repository.DataException; +import org.qortal.utils.NTP; + +import java.io.IOException; + +public class ArbitraryDataBuildQueueItem extends ArbitraryDataResource { + + private final Long creationTimestamp; + private Long buildStartTimestamp = null; + private Long buildEndTimestamp = null; + private boolean failed = false; + + /* The maximum amount of time to spend on a single build */ + // TODO: interrupt an in-progress build + public static long BUILD_TIMEOUT = 60*1000L; // 60 seconds + /* The amount of time to remember that a build has failed, to avoid retries */ + public static long FAILURE_TIMEOUT = 5*60*1000L; // 5 minutes + + public ArbitraryDataBuildQueueItem(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) { + super(resourceId, resourceIdType, service, identifier); + + this.creationTimestamp = NTP.getTime(); + } + + public void build() throws IOException, DataException, MissingDataException { + Long now = NTP.getTime(); + if (now == null) { + throw new DataException("NTP time hasn't synced yet"); + } + + this.buildStartTimestamp = now; + ArbitraryDataReader arbitraryDataReader = + new ArbitraryDataReader(this.resourceId, this.resourceIdType, this.service, this.identifier); + + try { + arbitraryDataReader.loadSynchronously(true); + } finally { + this.buildEndTimestamp = NTP.getTime(); + } + } + + public boolean isBuilding() { + return this.buildStartTimestamp != null; + } + + public boolean isQueued() { + return this.buildStartTimestamp == null; + } + + public boolean hasReachedBuildTimeout(Long now) { + if (now == null || this.creationTimestamp == null) { + return true; + } + return now - this.creationTimestamp > BUILD_TIMEOUT; + } + + public boolean hasReachedFailureTimeout(Long now) { + if (now == null || this.buildStartTimestamp == null) { + return true; + } + return now - this.buildStartTimestamp > FAILURE_TIMEOUT; + } + + public Long getBuildStartTimestamp() { + return this.buildStartTimestamp; + } + + public void setFailed(boolean failed) { + this.failed = failed; + } +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataBuilder.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuilder.java new file mode 100644 index 00000000..4f0e3835 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuilder.java @@ -0,0 +1,280 @@ +package org.qortal.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.metadata.ArbitraryDataMetadataCache; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.Method; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class ArbitraryDataBuilder { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuilder.class); + + private final String name; + private final Service service; + private final String identifier; + + private boolean canRequestMissingFiles; + + private List transactions; + private ArbitraryTransactionData latestPutTransaction; + private final List paths; + private byte[] latestSignature; + private Path finalPath; + private int layerCount; + + public ArbitraryDataBuilder(String name, Service service, String identifier) { + this.name = name; + this.service = service; + this.identifier = identifier; + this.paths = new ArrayList<>(); + + // By default we can request missing files + // Callers can use setCanRequestMissingFiles(false) to prevent it + this.canRequestMissingFiles = true; + } + + /** + * Process transactions, but do not build anything + * This is useful for checking the status of a given resource + * + * @throws DataException + * @throws IOException + * @throws MissingDataException + */ + public void process() throws DataException, IOException, MissingDataException { + this.fetchTransactions(); + this.validateTransactions(); + this.processTransactions(); + this.validatePaths(); + this.findLatestSignature(); + } + + /** + * Build the latest state of a given resource + * + * @throws DataException + * @throws IOException + * @throws MissingDataException + */ + public void build() throws DataException, IOException, MissingDataException { + this.process(); + this.buildLatestState(); + this.cacheLatestSignature(); + } + + private void fetchTransactions() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Get the most recent PUT + ArbitraryTransactionData latestPut = repository.getArbitraryRepository() + .getLatestTransaction(this.name, this.service, Method.PUT, this.identifier); + if (latestPut == null) { + String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s", + this.name, this.service, this.identifierString()); + throw new DataException(message); + } + this.latestPutTransaction = latestPut; + + // Load all transactions since the latest PUT + List transactionDataList = repository.getArbitraryRepository() + .getArbitraryTransactions(this.name, this.service, this.identifier, latestPut.getTimestamp()); + + this.transactions = transactionDataList; + this.layerCount = transactionDataList.size(); + } + } + + private void validateTransactions() throws DataException { + List transactionDataList = new ArrayList<>(this.transactions); + ArbitraryTransactionData latestPut = this.latestPutTransaction; + + if (latestPut == null) { + throw new DataException("Cannot PATCH without existing PUT. Deploy using PUT first."); + } + if (latestPut.getMethod() != Method.PUT) { + throw new DataException("Expected PUT but received PATCH"); + } + if (transactionDataList.size() == 0) { + throw new DataException(String.format("No transactions found for name %s, service %s, " + + "identifier: %s, since %d", name, service, this.identifierString(), latestPut.getTimestamp())); + } + + // Verify that the signature of the first transaction matches the latest PUT + ArbitraryTransactionData firstTransaction = transactionDataList.get(0); + if (!Arrays.equals(firstTransaction.getSignature(), latestPut.getSignature())) { + throw new DataException("First transaction did not match latest PUT transaction"); + } + + // Remove the first transaction, as it should be the only PUT + transactionDataList.remove(0); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + if (transactionData == null) { + throw new DataException("Transaction not found"); + } + if (transactionData.getMethod() != Method.PATCH) { + throw new DataException("Expected PATCH but received PUT"); + } + } + } + + private void processTransactions() throws IOException, DataException, MissingDataException { + List transactionDataList = new ArrayList<>(this.transactions); + + int count = 0; + for (ArbitraryTransactionData transactionData : transactionDataList) { + LOGGER.trace("Found arbitrary transaction {}", Base58.encode(transactionData.getSignature())); + count++; + + // Build the data file, overwriting anything that was previously there + String sig58 = Base58.encode(transactionData.getSignature()); + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(sig58, ResourceIdType.TRANSACTION_DATA, + this.service, this.identifier); + arbitraryDataReader.setTransactionData(transactionData); + arbitraryDataReader.setCanRequestMissingFiles(this.canRequestMissingFiles); + boolean hasMissingData = false; + try { + arbitraryDataReader.loadSynchronously(true); + } + catch (MissingDataException e) { + hasMissingData = true; + } + + // Handle missing data + if (hasMissingData) { + if (!this.canRequestMissingFiles) { + throw new MissingDataException("Files are missing but were not requested."); + } + if (count == transactionDataList.size()) { + // This is the final transaction in the list, so we need to fail + throw new MissingDataException("Requesting missing files. Please wait and try again."); + } + // There are more transactions, so we should process them to give them the opportunity to request data + continue; + } + + // By this point we should have all data needed to build the layers + Path path = arbitraryDataReader.getFilePath(); + if (path == null) { + throw new DataException(String.format("Null path when building data from transaction %s", sig58)); + } + if (!Files.exists(path)) { + throw new DataException(String.format("Path doesn't exist when building data from transaction %s", sig58)); + } + paths.add(path); + } + } + + private void findLatestSignature() throws DataException { + if (this.transactions.size() == 0) { + throw new DataException("Unable to find latest signature from empty transaction list"); + } + + // Find the latest signature + ArbitraryTransactionData latestTransaction = this.transactions.get(this.transactions.size() - 1); + if (latestTransaction == null) { + throw new DataException("Unable to find latest signature from null transaction"); + } + + this.latestSignature = latestTransaction.getSignature(); + } + + private void validatePaths() throws DataException { + if (this.paths.isEmpty()) { + throw new DataException("No paths available from which to build latest state"); + } + } + + private void buildLatestState() throws IOException, DataException { + if (this.paths.size() == 1) { + // No patching needed + this.finalPath = this.paths.get(0); + return; + } + + Path pathBefore = this.paths.get(0); + boolean validateAllLayers = Settings.getInstance().shouldValidateAllDataLayers(); + + // Loop from the second path onwards + for (int i=1; i addedPaths; + private final List modifiedPaths; + private final List removedPaths; + + private int totalFileCount; + private ArbitraryDataMetadataPatch metadata; + + public ArbitraryDataDiff(Path pathBefore, Path pathAfter, byte[] previousSignature) throws DataException { + this.pathBefore = pathBefore; + this.pathAfter = pathAfter; + this.previousSignature = previousSignature; + + this.addedPaths = new ArrayList<>(); + this.modifiedPaths = new ArrayList<>(); + this.removedPaths = new ArrayList<>(); + + this.createRandomIdentifier(); + this.createOutputDirectory(); + } + + public void compute() throws IOException, DataException { + try { + this.preExecute(); + this.hashPreviousState(); + this.findAddedOrModifiedFiles(); + this.findRemovedFiles(); + this.validate(); + this.hashCurrentState(); + this.writeMetadata(); + + } finally { + this.postExecute(); + } + } + + private void preExecute() { + LOGGER.debug("Generating diff..."); + } + + private void postExecute() { + + } + + private void createRandomIdentifier() { + this.identifier = UUID.randomUUID().toString(); + } + + private void createOutputDirectory() throws DataException { + // Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware + String baseDir = Settings.getInstance().getTempDataPath(); + Path tempDir = Paths.get(baseDir, "diff", this.identifier); + try { + Files.createDirectories(tempDir); + } catch (IOException e) { + throw new DataException("Unable to create temp directory"); + } + this.diffPath = tempDir; + } + + private void hashPreviousState() throws IOException, DataException { + ArbitraryDataDigest digest = new ArbitraryDataDigest(this.pathBefore); + digest.compute(); + this.previousHash = digest.getHash(); + } + + private void findAddedOrModifiedFiles() throws IOException { + try { + final Path pathBeforeAbsolute = this.pathBefore.toAbsolutePath(); + final Path pathAfterAbsolute = this.pathAfter.toAbsolutePath(); + final Path diffPathAbsolute = this.diffPath.toAbsolutePath(); + final ArbitraryDataDiff diff = this; + + // Check for additions or modifications + Files.walkFileTree(this.pathAfter, new FileVisitor<>() { + + @Override + public FileVisitResult preVisitDirectory(Path after, BasicFileAttributes attrs) { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path afterPathAbsolute, BasicFileAttributes attrs) throws IOException { + Path afterPathRelative = pathAfterAbsolute.relativize(afterPathAbsolute.toAbsolutePath()); + Path beforePathAbsolute = pathBeforeAbsolute.resolve(afterPathRelative); + + if (afterPathRelative.startsWith(".qortal")) { + // Ignore the .qortal metadata folder + return FileVisitResult.CONTINUE; + } + + boolean wasAdded = false; + boolean wasModified = false; + + if (!Files.exists(beforePathAbsolute)) { + LOGGER.trace("File was added: {}", afterPathRelative.toString()); + diff.addedPaths.add(afterPathRelative); + wasAdded = true; + } + else if (Files.size(afterPathAbsolute) != Files.size(beforePathAbsolute)) { + // Check file size first because it's quicker + LOGGER.trace("File size was modified: {}", afterPathRelative.toString()); + wasModified = true; + } + else if (!Arrays.equals(ArbitraryDataDiff.digestFromPath(afterPathAbsolute), ArbitraryDataDiff.digestFromPath(beforePathAbsolute))) { + // Check hashes as a last resort + LOGGER.trace("File contents were modified: {}", afterPathRelative.toString()); + wasModified = true; + } + + if (wasAdded) { + diff.copyFilePathToBaseDir(afterPathAbsolute, diffPathAbsolute, afterPathRelative); + } + if (wasModified) { + try { + diff.pathModified(beforePathAbsolute, afterPathAbsolute, afterPathRelative, diffPathAbsolute); + } catch (DataException e) { + // We can only throw IOExceptions because we are overriding FileVisitor.visitFile() + throw new IOException(e); + } + } + + // Keep a tally of the total number of files to help with decision making + diff.totalFileCount++; + + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException e){ + LOGGER.info("File visit failed: {}, error: {}", file.toString(), e.getMessage()); + // TODO: throw exception? + return FileVisitResult.TERMINATE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException e) { + return FileVisitResult.CONTINUE; + } + + }); + } catch (IOException e) { + LOGGER.info("IOException when walking through file tree: {}", e.getMessage()); + throw(e); + } + } + + private void findRemovedFiles() throws IOException { + try { + final Path pathBeforeAbsolute = this.pathBefore.toAbsolutePath(); + final Path pathAfterAbsolute = this.pathAfter.toAbsolutePath(); + final ArbitraryDataDiff diff = this; + + // Check for removals + Files.walkFileTree(this.pathBefore, new FileVisitor<>() { + + @Override + public FileVisitResult preVisitDirectory(Path before, BasicFileAttributes attrs) { + Path directoryPathBefore = pathBeforeAbsolute.relativize(before.toAbsolutePath()); + Path directoryPathAfter = pathAfterAbsolute.resolve(directoryPathBefore); + + if (directoryPathBefore.startsWith(".qortal")) { + // Ignore the .qortal metadata folder + return FileVisitResult.CONTINUE; + } + + if (!Files.exists(directoryPathAfter)) { + LOGGER.trace("Directory was removed: {}", directoryPathAfter.toString()); + diff.removedPaths.add(directoryPathBefore); + // TODO: we might need to mark directories differently to files + } + + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path before, BasicFileAttributes attrs) { + Path filePathBefore = pathBeforeAbsolute.relativize(before.toAbsolutePath()); + Path filePathAfter = pathAfterAbsolute.resolve(filePathBefore); + + if (filePathBefore.startsWith(".qortal")) { + // Ignore the .qortal metadata folder + return FileVisitResult.CONTINUE; + } + + if (!Files.exists(filePathAfter)) { + LOGGER.trace("File was removed: {}", filePathBefore.toString()); + diff.removedPaths.add(filePathBefore); + } + + // Keep a tally of the total number of files to help with decision making + diff.totalFileCount++; + + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException e){ + LOGGER.info("File visit failed: {}, error: {}", file.toString(), e.getMessage()); + // TODO: throw exception? + return FileVisitResult.TERMINATE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException e) { + return FileVisitResult.CONTINUE; + } + + }); + } catch (IOException e) { + throw new IOException(String.format("IOException when walking through file tree: %s", e.getMessage())); + } + } + + private void validate() throws DataException { + if (this.addedPaths.isEmpty() && this.modifiedPaths.isEmpty() && this.removedPaths.isEmpty()) { + throw new DataException("Current state matches previous state. Nothing to do."); + } + } + + private void hashCurrentState() throws IOException, DataException { + ArbitraryDataDigest digest = new ArbitraryDataDigest(this.pathAfter); + digest.compute(); + this.currentHash = digest.getHash(); + } + + private void writeMetadata() throws IOException, DataException { + ArbitraryDataMetadataPatch metadata = new ArbitraryDataMetadataPatch(this.diffPath); + metadata.setAddedPaths(this.addedPaths); + metadata.setModifiedPaths(this.modifiedPaths); + metadata.setRemovedPaths(this.removedPaths); + metadata.setPreviousSignature(this.previousSignature); + metadata.setPreviousHash(this.previousHash); + metadata.setCurrentHash(this.currentHash); + metadata.write(); + this.metadata = metadata; + } + + + private void pathModified(Path beforePathAbsolute, Path afterPathAbsolute, Path afterPathRelative, + Path destinationBasePathAbsolute) throws IOException, DataException { + + Path destination = Paths.get(destinationBasePathAbsolute.toString(), afterPathRelative.toString()); + long beforeSize = Files.size(beforePathAbsolute); + long afterSize = Files.size(afterPathAbsolute); + DiffType diffType; + + if (beforeSize > MAX_DIFF_FILE_SIZE || afterSize > MAX_DIFF_FILE_SIZE) { + // Files are large, so don't attempt a diff + this.copyFilePathToBaseDir(afterPathAbsolute, destinationBasePathAbsolute, afterPathRelative); + diffType = DiffType.COMPLETE_FILE; + } + else { + // Attempt to create patch using java-diff-utils + UnifiedDiffPatch unifiedDiffPatch = new UnifiedDiffPatch(beforePathAbsolute, afterPathAbsolute, destination); + unifiedDiffPatch.create(); + if (unifiedDiffPatch.isValid()) { + diffType = DiffType.UNIFIED_DIFF; + } + else { + // Diff failed validation, so copy the whole file instead + this.copyFilePathToBaseDir(afterPathAbsolute, destinationBasePathAbsolute, afterPathRelative); + diffType = DiffType.COMPLETE_FILE; + } + } + + ModifiedPath modifiedPath = new ModifiedPath(afterPathRelative, diffType); + this.modifiedPaths.add(modifiedPath); + } + + private void copyFilePathToBaseDir(Path source, Path base, Path relativePath) throws IOException { + if (!Files.exists(source)) { + throw new IOException(String.format("File not found: %s", source.toString())); + } + + // Ensure parent folders exist in the destination + Path dest = Paths.get(base.toString(), relativePath.toString()); + File file = new File(dest.toString()); + File parent = file.getParentFile(); + if (parent != null) { + parent.mkdirs(); + } + + LOGGER.trace("Copying {} to {}", source, dest); + Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING); + } + + + public Path getDiffPath() { + return this.diffPath; + } + + public int getTotalFileCount() { + return this.totalFileCount; + } + + public ArbitraryDataMetadataPatch getMetadata() { + return this.metadata; + } + + + // Utils + + private static byte[] digestFromPath(Path path) { + try { + return Crypto.digest(path.toFile()); + } catch (IOException e) { + return null; + } + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataDigest.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataDigest.java new file mode 100644 index 00000000..9703b231 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataDigest.java @@ -0,0 +1,73 @@ +package org.qortal.arbitrary; + +import org.qortal.repository.DataException; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public class ArbitraryDataDigest { + + private final Path path; + private byte[] hash; + + public ArbitraryDataDigest(Path path) { + this.path = path; + } + + public void compute() throws IOException, DataException { + List allPaths = Files.walk(path).filter(Files::isRegularFile).sorted().collect(Collectors.toList()); + Path basePathAbsolute = this.path.toAbsolutePath(); + + MessageDigest sha256; + try { + sha256 = MessageDigest.getInstance("SHA-256"); + } catch (NoSuchAlgorithmException e) { + throw new DataException("SHA-256 hashing algorithm unavailable"); + } + + for (Path path : allPaths) { + // We need to work with paths relative to the base path, to ensure the same hash + // is generated on different systems + Path relativePath = basePathAbsolute.relativize(path.toAbsolutePath()); + + // Exclude Qortal folder since it can be different each time + // We only care about hashing the actual user data + if (relativePath.startsWith(".qortal/")) { + continue; + } + + // Hash path + byte[] filePathBytes = relativePath.toString().getBytes(StandardCharsets.UTF_8); + sha256.update(filePathBytes); + + // Hash contents + byte[] fileContent = Files.readAllBytes(path); + sha256.update(fileContent); + } + this.hash = sha256.digest(); + } + + public boolean isHashValid(byte[] hash) { + return Arrays.equals(hash, this.hash); + } + + public byte[] getHash() { + return this.hash; + } + + public String getHash58() { + if (this.hash == null) { + return null; + } + return Base58.encode(this.hash); + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java new file mode 100644 index 00000000..1eaeda3c --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java @@ -0,0 +1,735 @@ +package org.qortal.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata; +import org.qortal.crypto.Crypto; +import org.qortal.repository.DataException; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.*; +import java.util.stream.Stream; + +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.toMap; + + +public class ArbitraryDataFile { + + // Validation results + public enum ValidationResult { + OK(1), + FILE_TOO_LARGE(10), + FILE_NOT_FOUND(11); + + public final int value; + + private static final Map map = stream(ArbitraryDataFile.ValidationResult.values()).collect(toMap(result -> result.value, result -> result)); + + ValidationResult(int value) { + this.value = value; + } + + public static ArbitraryDataFile.ValidationResult valueOf(int value) { + return map.get(value); + } + } + + // Resource ID types + public enum ResourceIdType { + SIGNATURE, + FILE_HASH, + TRANSACTION_DATA, + NAME + } + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class); + + public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB + public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB + public static int SHORT_DIGEST_LENGTH = 8; + + protected Path filePath; + protected String hash58; + protected byte[] signature; + private ArrayList chunks; + private byte[] secret; + + // Metadata + private byte[] metadataHash; + private ArbitraryDataFile metadataFile; + private ArbitraryDataTransactionMetadata metadata; + + + public ArbitraryDataFile() { + } + + public ArbitraryDataFile(String hash58, byte[] signature) throws DataException { + this.createDataDirectory(); + this.filePath = ArbitraryDataFile.getOutputFilePath(hash58, signature, false); + this.chunks = new ArrayList<>(); + this.hash58 = hash58; + this.signature = signature; + } + + public ArbitraryDataFile(byte[] fileContent, byte[] signature) throws DataException { + if (fileContent == null) { + LOGGER.error("fileContent is null"); + return; + } + + this.hash58 = Base58.encode(Crypto.digest(fileContent)); + this.signature = signature; + LOGGER.trace(String.format("File digest: %s, size: %d bytes", this.hash58, fileContent.length)); + + Path outputFilePath = getOutputFilePath(this.hash58, signature, true); + File outputFile = outputFilePath.toFile(); + try (FileOutputStream outputStream = new FileOutputStream(outputFile)) { + outputStream.write(fileContent); + this.filePath = outputFilePath; + // Verify hash + if (!this.hash58.equals(this.digest58())) { + LOGGER.error("Hash {} does not match file digest {}", this.hash58, this.digest58()); + this.delete(); + throw new DataException("Data file digest validation failed"); + } + } catch (IOException e) { + throw new DataException("Unable to write data to file"); + } + } + + public static ArbitraryDataFile fromHash58(String hash58, byte[] signature) throws DataException { + return new ArbitraryDataFile(hash58, signature); + } + + public static ArbitraryDataFile fromHash(byte[] hash, byte[] signature) throws DataException { + return ArbitraryDataFile.fromHash58(Base58.encode(hash), signature); + } + + public static ArbitraryDataFile fromPath(Path path, byte[] signature) { + if (path == null) { + return null; + } + File file = path.toFile(); + if (file.exists()) { + try { + byte[] digest = Crypto.digest(file); + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + + // Copy file to data directory if needed + if (Files.exists(path) && !arbitraryDataFile.isInBaseDirectory(path)) { + arbitraryDataFile.copyToDataDirectory(path, signature); + } + // Or, if it's already in the data directory, we may need to move it + else if (!path.equals(arbitraryDataFile.getFilePath())) { + // Wrong path, so relocate (but don't cleanup, as the source folder may still be needed by the caller) + Path dest = arbitraryDataFile.getFilePath(); + FilesystemUtils.moveFile(path, dest, false); + } + return arbitraryDataFile; + + } catch (IOException | DataException e) { + LOGGER.error("Couldn't compute digest for ArbitraryDataFile"); + } + } + return null; + } + + public static ArbitraryDataFile fromFile(File file, byte[] signature) { + return ArbitraryDataFile.fromPath(Paths.get(file.getPath()), signature); + } + + private boolean createDataDirectory() { + // Create the data directory if it doesn't exist + String dataPath = Settings.getInstance().getDataPath(); + Path dataDirectory = Paths.get(dataPath); + try { + Files.createDirectories(dataDirectory); + } catch (IOException e) { + LOGGER.error("Unable to create data directory"); + return false; + } + return true; + } + + private Path copyToDataDirectory(Path sourcePath, byte[] signature) throws DataException { + if (this.hash58 == null || this.filePath == null) { + return null; + } + Path outputFilePath = getOutputFilePath(this.hash58, signature, true); + sourcePath = sourcePath.toAbsolutePath(); + Path destPath = outputFilePath.toAbsolutePath(); + try { + return Files.copy(sourcePath, destPath, StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + throw new DataException(String.format("Unable to copy file %s to data directory %s", sourcePath, destPath)); + } + } + + public static Path getOutputFilePath(String hash58, byte[] signature, boolean createDirectories) throws DataException { + Path directory; + + if (hash58 == null) { + return null; + } + if (signature != null) { + // Key by signature + String signature58 = Base58.encode(signature); + String sig58First2Chars = signature58.substring(0, 2).toLowerCase(); + String sig58Next2Chars = signature58.substring(2, 4).toLowerCase(); + directory = Paths.get(Settings.getInstance().getDataPath(), sig58First2Chars, sig58Next2Chars, signature58); + } + else { + // Put files without signatures in a "_misc" directory, and the files will be relocated later + String hash58First2Chars = hash58.substring(0, 2).toLowerCase(); + String hash58Next2Chars = hash58.substring(2, 4).toLowerCase(); + directory = Paths.get(Settings.getInstance().getDataPath(), "_misc", hash58First2Chars, hash58Next2Chars); + } + + if (createDirectories) { + try { + Files.createDirectories(directory); + } catch (IOException e) { + throw new DataException("Unable to create data subdirectory"); + } + } + return Paths.get(directory.toString(), hash58); + } + + public ValidationResult isValid() { + try { + // Ensure the file exists on disk + if (!Files.exists(this.filePath)) { + LOGGER.error("File doesn't exist at path {}", this.filePath); + return ValidationResult.FILE_NOT_FOUND; + } + + // Validate the file size + long fileSize = Files.size(this.filePath); + if (fileSize > MAX_FILE_SIZE) { + LOGGER.error(String.format("ArbitraryDataFile is too large: %d bytes (max size: %d bytes)", fileSize, MAX_FILE_SIZE)); + return ArbitraryDataFile.ValidationResult.FILE_TOO_LARGE; + } + + } catch (IOException e) { + return ValidationResult.FILE_NOT_FOUND; + } + + return ValidationResult.OK; + } + + public void validateFileSize(long expectedSize) throws DataException { + // Verify that we can determine the file's size + long fileSize = 0; + try { + fileSize = Files.size(this.getFilePath()); + } catch (IOException e) { + throw new DataException(String.format("Couldn't get file size for transaction %s", Base58.encode(signature))); + } + + // Ensure the file's size matches the size reported by the transaction + if (fileSize != expectedSize) { + throw new DataException(String.format("File size mismatch for transaction %s", Base58.encode(signature))); + } + } + + private void addChunk(ArbitraryDataFileChunk chunk) { + this.chunks.add(chunk); + } + + private void addChunkHashes(List chunkHashes) throws DataException { + if (chunkHashes == null || chunkHashes.isEmpty()) { + return; + } + for (byte[] chunkHash : chunkHashes) { + ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature); + this.addChunk(chunk); + } + } + + public List getChunkHashes() { + List hashes = new ArrayList<>(); + if (this.chunks == null || this.chunks.isEmpty()) { + return hashes; + } + + for (ArbitraryDataFileChunk chunkData : this.chunks) { + hashes.add(chunkData.getHash()); + } + + return hashes; + } + + public int split(int chunkSize) throws DataException { + try { + + File file = this.getFile(); + byte[] buffer = new byte[chunkSize]; + this.chunks = new ArrayList<>(); + + if (file != null) { + try (FileInputStream fileInputStream = new FileInputStream(file); + BufferedInputStream bis = new BufferedInputStream(fileInputStream)) { + + int numberOfBytes; + while ((numberOfBytes = bis.read(buffer)) > 0) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + out.write(buffer, 0, numberOfBytes); + out.flush(); + + ArbitraryDataFileChunk chunk = new ArbitraryDataFileChunk(out.toByteArray(), this.signature); + ValidationResult validationResult = chunk.isValid(); + if (validationResult == ValidationResult.OK) { + this.chunks.add(chunk); + } else { + throw new DataException(String.format("Chunk %s is invalid", chunk)); + } + } + } + } + } + } catch (Exception e) { + throw new DataException("Unable to split file into chunks"); + } + + return this.chunks.size(); + } + + public boolean join() { + // Ensure we have chunks + if (this.chunks != null && this.chunks.size() > 0) { + + // Create temporary path for joined file + // Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware + String baseDir = Settings.getInstance().getTempDataPath(); + Path tempDir = Paths.get(baseDir, "join"); + try { + Files.createDirectories(tempDir); + } catch (IOException e) { + return false; + } + + // Join the chunks + Path outputPath = Paths.get(tempDir.toString(), this.chunks.get(0).digest58()); + File outputFile = new File(outputPath.toString()); + try (BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(outputFile))) { + for (ArbitraryDataFileChunk chunk : this.chunks) { + File sourceFile = chunk.filePath.toFile(); + BufferedInputStream in = new BufferedInputStream(new FileInputStream(sourceFile)); + byte[] buffer = new byte[2048]; + int inSize; + while ((inSize = in.read(buffer)) != -1) { + out.write(buffer, 0, inSize); + } + in.close(); + } + out.close(); + + // Copy temporary file to data directory + this.filePath = this.copyToDataDirectory(outputPath, this.signature); + if (FilesystemUtils.pathInsideDataOrTempPath(outputPath)) { + Files.delete(outputPath); + } + + return true; + } catch (FileNotFoundException e) { + return false; + } catch (IOException | DataException e) { + return false; + } + } + return false; + } + + public boolean delete() { + // Delete the complete file + // ... but only if it's inside the Qortal data or temp directory + if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) { + if (Files.exists(this.filePath)) { + try { + Files.delete(this.filePath); + this.cleanupFilesystem(); + LOGGER.debug("Deleted file {}", this.filePath); + return true; + } catch (IOException e) { + LOGGER.warn("Couldn't delete file at path {}", this.filePath); + } + } + } + return false; + } + + public boolean deleteAllChunks() { + boolean success = false; + + // Delete the individual chunks + if (this.chunks != null && this.chunks.size() > 0) { + Iterator iterator = this.chunks.iterator(); + while (iterator.hasNext()) { + ArbitraryDataFileChunk chunk = (ArbitraryDataFileChunk) iterator.next(); + success = chunk.delete(); + iterator.remove(); + } + } + return success; + } + + public boolean deleteMetadata() { + if (this.metadataFile != null && this.metadataFile.exists()) { + return this.metadataFile.delete(); + } + return false; + } + + public boolean deleteAll() { + // Delete the complete file + boolean fileDeleted = this.delete(); + + // Delete the metadata file + boolean metadataDeleted = this.deleteMetadata(); + + // Delete the individual chunks + boolean chunksDeleted = this.deleteAllChunks(); + + return fileDeleted || metadataDeleted || chunksDeleted; + } + + protected void cleanupFilesystem() throws IOException { + // It is essential that use a separate path reference in this method + // as we don't want to modify this.filePath + Path path = this.filePath; + + FilesystemUtils.safeDeleteEmptyParentDirectories(path); + } + + public byte[] getBytes() { + try { + return Files.readAllBytes(this.filePath); + } catch (IOException e) { + LOGGER.error("Unable to read bytes for file"); + return null; + } + } + + + /* Helper methods */ + + private boolean isInBaseDirectory(Path filePath) { + Path path = filePath.toAbsolutePath(); + String dataPath = Settings.getInstance().getDataPath(); + String basePath = Paths.get(dataPath).toAbsolutePath().toString(); + return path.startsWith(basePath); + } + + public boolean exists() { + File file = this.filePath.toFile(); + return file.exists(); + } + + public boolean chunkExists(byte[] hash) { + for (ArbitraryDataFileChunk chunk : this.chunks) { + if (Arrays.equals(hash, chunk.getHash())) { + return chunk.exists(); + } + } + if (Arrays.equals(this.getHash(), hash)) { + return this.exists(); + } + return false; + } + + public boolean allChunksExist() { + try { + if (this.metadataHash == null) { + // We don't have any metadata so can't check if we have the chunks + // Even if this transaction has no chunks, we don't have the file either (already checked above) + return false; + } + + if (this.metadataFile == null) { + this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature); + if (!metadataFile.exists()) { + return false; + } + } + + // If the metadata file doesn't exist, we can't check if we have the chunks + if (!metadataFile.getFilePath().toFile().exists()) { + return false; + } + + if (this.metadata == null) { + this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath())); + } + + // Read the metadata + List chunks = metadata.getChunks(); + for (byte[] chunkHash : chunks) { + ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature); + if (!chunk.exists()) { + return false; + } + } + + return true; + + } catch (DataException e) { + // Something went wrong, so assume we don't have all the chunks + return false; + } + } + + public boolean anyChunksExist() throws DataException { + try { + if (this.metadataHash == null) { + // We don't have any metadata so can't check if we have the chunks + // Even if this transaction has no chunks, we don't have the file either (already checked above) + return false; + } + + if (this.metadataFile == null) { + this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature); + if (!metadataFile.exists()) { + return false; + } + } + + // If the metadata file doesn't exist, we can't check if we have any chunks + if (!metadataFile.getFilePath().toFile().exists()) { + return false; + } + + if (this.metadata == null) { + this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath())); + } + + // Read the metadata + List chunks = metadata.getChunks(); + for (byte[] chunkHash : chunks) { + ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature); + if (chunk.exists()) { + return true; + } + } + + return false; + + } catch (DataException e) { + // Something went wrong, so assume we don't have all the chunks + return false; + } + } + + public boolean allFilesExist() { + if (this.exists()) { + return true; + } + + // Complete file doesn't exist, so check the chunks + if (this.allChunksExist()) { + return true; + } + + return false; + } + + public boolean containsChunk(byte[] hash) { + for (ArbitraryDataFileChunk chunk : this.chunks) { + if (Arrays.equals(hash, chunk.getHash())) { + return true; + } + } + return false; + } + + public long size() { + try { + return Files.size(this.filePath); + } catch (IOException e) { + return 0; + } + } + + public int chunkCount() { + return this.chunks.size(); + } + + public List getChunks() { + return this.chunks; + } + + public byte[] chunkHashes() throws DataException { + if (this.chunks != null && this.chunks.size() > 0) { + // Return null if we only have one chunk, with the same hash as the parent + if (Arrays.equals(this.digest(), this.chunks.get(0).digest())) { + return null; + } + + try { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + for (ArbitraryDataFileChunk chunk : this.chunks) { + byte[] chunkHash = chunk.digest(); + if (chunkHash.length != 32) { + LOGGER.info("Invalid chunk hash length: {}", chunkHash.length); + throw new DataException("Invalid chunk hash length"); + } + outputStream.write(chunk.digest()); + } + return outputStream.toByteArray(); + } catch (IOException e) { + return null; + } + } + return null; + } + + public List chunkHashList() { + List chunks = new ArrayList<>(); + + if (this.chunks != null && this.chunks.size() > 0) { + // Return null if we only have one chunk, with the same hash as the parent + if (Arrays.equals(this.digest(), this.chunks.get(0).digest())) { + return null; + } + + try { + for (ArbitraryDataFileChunk chunk : this.chunks) { + byte[] chunkHash = chunk.digest(); + if (chunkHash.length != 32) { + LOGGER.info("Invalid chunk hash length: {}", chunkHash.length); + throw new DataException("Invalid chunk hash length"); + } + chunks.add(chunkHash); + } + return chunks; + + } catch (DataException e) { + return null; + } + } + return null; + } + + private void loadMetadata() throws DataException { + try { + this.metadata.read(); + + } catch (DataException | IOException e) { + throw new DataException(e); + } + } + + private File getFile() { + File file = this.filePath.toFile(); + if (file.exists()) { + return file; + } + return null; + } + + public Path getFilePath() { + return this.filePath; + } + + public byte[] digest() { + File file = this.getFile(); + if (file != null && file.exists()) { + try { + return Crypto.digest(file); + + } catch (IOException e) { + LOGGER.error("Couldn't compute digest for ArbitraryDataFile"); + } + } + return null; + } + + public String digest58() { + if (this.digest() != null) { + return Base58.encode(this.digest()); + } + return null; + } + + public String shortHash58() { + if (this.hash58 == null) { + return null; + } + return this.hash58.substring(0, Math.min(this.hash58.length(), SHORT_DIGEST_LENGTH)); + } + + public String getHash58() { + return this.hash58; + } + + public byte[] getHash() { + return Base58.decode(this.hash58); + } + + public String printChunks() { + String outputString = ""; + if (this.chunkCount() > 0) { + for (ArbitraryDataFileChunk chunk : this.chunks) { + if (outputString.length() > 0) { + outputString = outputString.concat(","); + } + outputString = outputString.concat(chunk.digest58()); + } + } + return outputString; + } + + public void setSecret(byte[] secret) { + this.secret = secret; + } + + public byte[] getSecret() { + return this.secret; + } + + public byte[] getSignature() { + return this.signature; + } + + public void setMetadataFile(ArbitraryDataFile metadataFile) { + this.metadataFile = metadataFile; + } + + public ArbitraryDataFile getMetadataFile() { + return this.metadataFile; + } + + public void setMetadataHash(byte[] hash) throws DataException { + this.metadataHash = hash; + + if (hash == null) { + return; + } + this.metadataFile = ArbitraryDataFile.fromHash(hash, this.signature); + if (metadataFile.exists()) { + this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath())); + this.addChunkHashes(this.metadata.getChunks()); + } + } + + public byte[] getMetadataHash() { + return this.metadataHash; + } + + public void setMetadata(ArbitraryDataTransactionMetadata metadata) throws DataException { + this.metadata = metadata; + this.loadMetadata(); + } + + @Override + public String toString() { + return this.shortHash58(); + } +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataFileChunk.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataFileChunk.java new file mode 100644 index 00000000..b113fbba --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataFileChunk.java @@ -0,0 +1,54 @@ +package org.qortal.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.repository.DataException; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.file.Files; + + +public class ArbitraryDataFileChunk extends ArbitraryDataFile { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileChunk.class); + + public ArbitraryDataFileChunk(String hash58, byte[] signature) throws DataException { + super(hash58, signature); + } + + public ArbitraryDataFileChunk(byte[] fileContent, byte[] signature) throws DataException { + super(fileContent, signature); + } + + public static ArbitraryDataFileChunk fromHash58(String hash58, byte[] signature) throws DataException { + return new ArbitraryDataFileChunk(hash58, signature); + } + + public static ArbitraryDataFileChunk fromHash(byte[] hash, byte[] signature) throws DataException { + return ArbitraryDataFileChunk.fromHash58(Base58.encode(hash), signature); + } + + @Override + public ValidationResult isValid() { + // DataChunk validation applies here too + ValidationResult superclassValidationResult = super.isValid(); + if (superclassValidationResult != ValidationResult.OK) { + return superclassValidationResult; + } + + try { + // Validate the file size (chunks have stricter limits) + long fileSize = Files.size(this.filePath); + if (fileSize > CHUNK_SIZE) { + LOGGER.error(String.format("DataFileChunk is too large: %d bytes (max chunk size: %d bytes)", fileSize, CHUNK_SIZE)); + return ValidationResult.FILE_TOO_LARGE; + } + + } catch (IOException e) { + return ValidationResult.FILE_NOT_FOUND; + } + + return ValidationResult.OK; + } +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataMerge.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataMerge.java new file mode 100644 index 00000000..eab5c828 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataMerge.java @@ -0,0 +1,176 @@ +package org.qortal.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataDiff.*; +import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch; +import org.qortal.arbitrary.patch.UnifiedDiffPatch; +import org.qortal.repository.DataException; +import org.qortal.settings.Settings; +import org.qortal.utils.FilesystemUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.*; +import java.util.List; +import java.util.UUID; + +public class ArbitraryDataMerge { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMerge.class); + + private final Path pathBefore; + private final Path pathAfter; + private Path mergePath; + private String identifier; + private ArbitraryDataMetadataPatch metadata; + + public ArbitraryDataMerge(Path pathBefore, Path pathAfter) { + this.pathBefore = pathBefore; + this.pathAfter = pathAfter; + } + + public void compute() throws IOException, DataException { + try { + this.preExecute(); + this.copyPreviousStateToMergePath(); + this.loadMetadata(); + this.applyDifferences(); + this.copyMetadata(); + + } finally { + this.postExecute(); + } + } + + private void preExecute() throws DataException { + this.createRandomIdentifier(); + this.createOutputDirectory(); + } + + private void postExecute() { + + } + + private void createRandomIdentifier() { + this.identifier = UUID.randomUUID().toString(); + } + + private void createOutputDirectory() throws DataException { + // Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware + String baseDir = Settings.getInstance().getTempDataPath(); + Path tempDir = Paths.get(baseDir, "merge", this.identifier); + try { + Files.createDirectories(tempDir); + } catch (IOException e) { + throw new DataException("Unable to create temp directory"); + } + this.mergePath = tempDir; + } + + private void copyPreviousStateToMergePath() throws IOException { + ArbitraryDataMerge.copyDirPathToBaseDir(this.pathBefore, this.mergePath, Paths.get("")); + } + + private void loadMetadata() throws IOException, DataException { + this.metadata = new ArbitraryDataMetadataPatch(this.pathAfter); + this.metadata.read(); + } + + private void applyDifferences() throws IOException, DataException { + + List addedPaths = this.metadata.getAddedPaths(); + for (Path path : addedPaths) { + LOGGER.trace("File was added: {}", path.toString()); + Path filePath = Paths.get(this.pathAfter.toString(), path.toString()); + ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, path); + } + + List modifiedPaths = this.metadata.getModifiedPaths(); + for (ModifiedPath modifiedPath : modifiedPaths) { + LOGGER.trace("File was modified: {}", modifiedPath.toString()); + this.applyPatch(modifiedPath); + } + + List removedPaths = this.metadata.getRemovedPaths(); + for (Path path : removedPaths) { + LOGGER.trace("File was removed: {}", path.toString()); + ArbitraryDataMerge.deletePathInBaseDir(this.mergePath, path); + } + } + + private void applyPatch(ModifiedPath modifiedPath) throws IOException, DataException { + if (modifiedPath.getDiffType() == DiffType.UNIFIED_DIFF) { + // Create destination file from patch + UnifiedDiffPatch unifiedDiffPatch = new UnifiedDiffPatch(pathBefore, pathAfter, mergePath); + unifiedDiffPatch.apply(modifiedPath.getPath()); + } + else if (modifiedPath.getDiffType() == DiffType.COMPLETE_FILE) { + // Copy complete file + Path filePath = Paths.get(this.pathAfter.toString(), modifiedPath.getPath().toString()); + ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, modifiedPath.getPath()); + } + else { + throw new DataException(String.format("Unrecognized patch diff type: %s", modifiedPath.getDiffType())); + } + } + + private void copyMetadata() throws IOException { + Path filePath = Paths.get(this.pathAfter.toString(), ".qortal"); + ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, Paths.get(".qortal")); + } + + + private static void copyPathToBaseDir(Path source, Path base, Path relativePath) throws IOException { + if (!Files.exists(source)) { + throw new IOException(String.format("File not found: %s", source.toString())); + } + + File sourceFile = source.toFile(); + Path dest = Paths.get(base.toString(), relativePath.toString()); + LOGGER.trace("Copying {} to {}", source, dest); + + if (sourceFile.isFile()) { + Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING); + } + else if (sourceFile.isDirectory()) { + FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString()); + } + else { + throw new IOException(String.format("Invalid file: %s", source.toString())); + } + } + + private static void copyDirPathToBaseDir(Path source, Path base, Path relativePath) throws IOException { + if (!Files.exists(source)) { + throw new IOException(String.format("File not found: %s", source.toString())); + } + + Path dest = Paths.get(base.toString(), relativePath.toString()); + LOGGER.trace("Copying {} to {}", source, dest); + FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString()); + } + + private static void deletePathInBaseDir(Path base, Path relativePath) throws IOException { + Path dest = Paths.get(base.toString(), relativePath.toString()); + File file = new File(dest.toString()); + if (file.exists() && file.isFile()) { + if (FilesystemUtils.pathInsideDataOrTempPath(dest)) { + LOGGER.trace("Deleting file {}", dest); + Files.delete(dest); + } + } + if (file.exists() && file.isDirectory()) { + if (FilesystemUtils.pathInsideDataOrTempPath(dest)) { + LOGGER.trace("Deleting directory {}", dest); + FileUtils.deleteDirectory(file); + } + } + } + + public Path getMergePath() { + return this.mergePath; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java new file mode 100644 index 00000000..a5bbf557 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java @@ -0,0 +1,536 @@ +package org.qortal.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataBuildManager; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.crypto.AES; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.settings.Settings; +import org.qortal.transform.Transformer; +import org.qortal.utils.ArbitraryTransactionUtils; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.ZipUtils; + +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.File; +import java.io.IOException; +import java.io.InvalidObjectException; +import java.nio.file.*; +import java.nio.file.attribute.BasicFileAttributes; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; + +public class ArbitraryDataReader { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataReader.class); + + private final String resourceId; + private final ResourceIdType resourceIdType; + private final Service service; + private final String identifier; + private ArbitraryTransactionData transactionData; + private String secret58; + private Path filePath; + private boolean canRequestMissingFiles; + + // Intermediate paths + private final Path workingPath; + private final Path uncompressedPath; + + // Stats (available for synchronous builds only) + private int layerCount; + private byte[] latestSignature; + + public ArbitraryDataReader(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) { + // Ensure names are always lowercase + if (resourceIdType == ResourceIdType.NAME) { + resourceId = resourceId.toLowerCase(); + } + + // If identifier is a blank string, or reserved keyword "default", treat it as null + if (identifier == null || identifier.equals("") || identifier.equals("default")) { + identifier = null; + } + + this.resourceId = resourceId; + this.resourceIdType = resourceIdType; + this.service = service; + this.identifier = identifier; + + this.workingPath = this.buildWorkingPath(); + this.uncompressedPath = Paths.get(this.workingPath.toString(), "data"); + + // By default we can request missing files + // Callers can use setCanRequestMissingFiles(false) to prevent it + this.canRequestMissingFiles = true; + } + + private Path buildWorkingPath() { + // Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware + String baseDir = Settings.getInstance().getTempDataPath(); + String identifier = this.identifier != null ? this.identifier : "default"; + return Paths.get(baseDir, "reader", this.resourceIdType.toString(), this.resourceId, this.service.toString(), identifier); + } + + public boolean isCachedDataAvailable() { + // If this resource is in the build queue then we shouldn't attempt to serve + // cached data, as it may not be fully built + if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(this.createQueueItem())) { + return false; + } + + // Not in the build queue - so check the cache itself + ArbitraryDataCache cache = new ArbitraryDataCache(this.uncompressedPath, false, + this.resourceId, this.resourceIdType, this.service, this.identifier); + if (cache.isCachedDataAvailable()) { + this.filePath = this.uncompressedPath; + return true; + } + return false; + } + + public boolean isBuilding() { + return ArbitraryDataBuildManager.getInstance().isInBuildQueue(this.createQueueItem()); + } + + private ArbitraryDataBuildQueueItem createQueueItem() { + return new ArbitraryDataBuildQueueItem(this.resourceId, this.resourceIdType, this.service, this.identifier); + } + + /** + * loadAsynchronously + * + * Attempts to load the resource asynchronously + * This adds the build task to a queue, and the result will be cached when complete + * To check the status of the build, periodically call isCachedDataAvailable() + * Once it returns true, you can then use getFilePath() to access the data itself. + * @return true if added or already present in queue; false if not + */ + public boolean loadAsynchronously() { + return ArbitraryDataBuildManager.getInstance().addToBuildQueue(this.createQueueItem()); + } + + /** + * loadSynchronously + * + * Attempts to load the resource synchronously + * Warning: this can block for a long time when building or fetching complex data + * If no exception is thrown, you can then use getFilePath() to access the data immediately after returning + * + * @param overwrite - set to true to force rebuild an existing cache + * @throws IOException + * @throws DataException + * @throws MissingDataException + */ + public void loadSynchronously(boolean overwrite) throws DataException, IOException, MissingDataException { + try { + ArbitraryDataCache cache = new ArbitraryDataCache(this.uncompressedPath, overwrite, + this.resourceId, this.resourceIdType, this.service, this.identifier); + if (cache.isCachedDataAvailable()) { + // Use cached data + this.filePath = this.uncompressedPath; + return; + } + + this.preExecute(); + this.deleteExistingFiles(); + this.fetch(); + this.decrypt(); + this.uncompress(); + this.validate(); + + } catch (DataException e) { + this.deleteWorkingDirectory(); + throw new DataException(e.getMessage()); + + } finally { + this.postExecute(); + } + } + + private void preExecute() throws DataException { + ArbitraryDataBuildManager.getInstance().setBuildInProgress(true); + this.checkEnabled(); + this.createWorkingDirectory(); + this.createUncompressedDirectory(); + } + + private void postExecute() { + ArbitraryDataBuildManager.getInstance().setBuildInProgress(false); + } + + private void checkEnabled() throws DataException { + if (!Settings.getInstance().isQdnEnabled()) { + throw new DataException("QDN is disabled in settings"); + } + } + + private void createWorkingDirectory() throws DataException { + try { + Files.createDirectories(this.workingPath); + } catch (IOException e) { + throw new DataException("Unable to create temp directory"); + } + } + + /** + * Working directory should only be deleted on failure, since it is currently used to + * serve a cached version of the resource for subsequent requests. + * @throws IOException + */ + private void deleteWorkingDirectory() throws IOException { + FilesystemUtils.safeDeleteDirectory(this.workingPath, true); + } + + private void createUncompressedDirectory() throws DataException { + try { + // Create parent directory + Files.createDirectories(this.uncompressedPath.getParent()); + // Ensure child directory doesn't already exist + FileUtils.deleteDirectory(this.uncompressedPath.toFile()); + + } catch (IOException e) { + throw new DataException("Unable to create uncompressed directory"); + } + } + + private void deleteExistingFiles() { + final Path uncompressedPath = this.uncompressedPath; + if (FilesystemUtils.pathInsideDataOrTempPath(uncompressedPath)) { + if (Files.exists(uncompressedPath)) { + LOGGER.trace("Attempting to delete path {}", this.uncompressedPath); + try { + Files.walkFileTree(uncompressedPath, new SimpleFileVisitor<>() { + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException { + // Don't delete the parent directory, as we want to leave an empty folder + if (dir.compareTo(uncompressedPath) == 0) { + return FileVisitResult.CONTINUE; + } + + if (e == null) { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } else { + throw e; + } + } + + }); + } catch (IOException e) { + LOGGER.debug("Unable to delete file or directory: {}", e.getMessage()); + } + } + } + } + + private void fetch() throws DataException, IOException, MissingDataException { + switch (resourceIdType) { + + case FILE_HASH: + this.fetchFromFileHash(); + break; + + case NAME: + this.fetchFromName(); + break; + + case SIGNATURE: + this.fetchFromSignature(); + break; + + case TRANSACTION_DATA: + this.fetchFromTransactionData(this.transactionData); + break; + + default: + throw new DataException(String.format("Unknown resource ID type specified: %s", resourceIdType.toString())); + } + } + + private void fetchFromFileHash() throws DataException { + // Load data file directly from the hash (without a signature) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash58(resourceId, null); + // Set filePath to the location of the ArbitraryDataFile + this.filePath = arbitraryDataFile.getFilePath(); + } + + private void fetchFromName() throws DataException, IOException, MissingDataException { + try { + + // Build the existing state using past transactions + ArbitraryDataBuilder builder = new ArbitraryDataBuilder(this.resourceId, this.service, this.identifier); + builder.build(); + Path builtPath = builder.getFinalPath(); + if (builtPath == null) { + throw new DataException("Unable to build path"); + } + + // Update stats + this.layerCount = builder.getLayerCount(); + this.latestSignature = builder.getLatestSignature(); + + // Set filePath to the builtPath + this.filePath = builtPath; + + } catch (InvalidObjectException e) { + // Hash validation failed. Invalidate the cache for this name, so it can be rebuilt + LOGGER.info("Deleting {}", this.workingPath.toString()); + FilesystemUtils.safeDeleteDirectory(this.workingPath, false); + throw(e); + } + } + + private void fetchFromSignature() throws DataException, IOException, MissingDataException { + + // Load the full transaction data from the database so we can access the file hashes + ArbitraryTransactionData transactionData; + try (final Repository repository = RepositoryManager.getRepository()) { + transactionData = (ArbitraryTransactionData) repository.getTransactionRepository().fromSignature(Base58.decode(resourceId)); + } + if (transactionData == null) { + throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId)); + } + + this.fetchFromTransactionData(transactionData); + } + + private void fetchFromTransactionData(ArbitraryTransactionData transactionData) throws DataException, IOException, MissingDataException { + if (transactionData == null) { + throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId)); + } + + // Load hashes + byte[] digest = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + byte[] signature = transactionData.getSignature(); + + // Load secret + byte[] secret = transactionData.getSecret(); + if (secret != null) { + this.secret58 = Base58.encode(secret); + } + + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData); + arbitraryDataFile.setMetadataHash(metadataHash); + + if (!arbitraryDataFile.allFilesExist()) { + if (ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())) { + throw new DataException( + String.format("Unable to request missing data for file %s because the name is blocked", arbitraryDataFile)); + } + else { + // Ask the arbitrary data manager to fetch data for this transaction + String message; + if (this.canRequestMissingFiles) { + boolean requested = ArbitraryDataManager.getInstance().fetchData(transactionData); + + if (requested) { + message = String.format("Requested missing data for file %s", arbitraryDataFile); + } else { + message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature())); + } + } + else { + message = String.format("Missing data for file %s", arbitraryDataFile); + } + + // Throw a missing data exception, which allows subsequent layers to fetch data + LOGGER.info(message); + throw new MissingDataException(message); + } + } + + if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) { + // We have all the chunks but not the complete file, so join them + arbitraryDataFile.join(); + } + + // If the complete file still doesn't exist then something went wrong + if (!arbitraryDataFile.exists()) { + throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile)); + } + // Ensure the complete hash matches the joined chunks + if (!Arrays.equals(arbitraryDataFile.digest(), digest)) { + // Delete the invalid file + arbitraryDataFile.delete(); + throw new DataException("Unable to validate complete file hash"); + } + // Ensure the file's size matches the size reported by the transaction (throws a DataException if not) + arbitraryDataFile.validateFileSize(transactionData.getSize()); + + // Set filePath to the location of the ArbitraryDataFile + this.filePath = arbitraryDataFile.getFilePath(); + } + + private void decrypt() throws DataException { + // Decrypt if we have the secret key. + byte[] secret = this.secret58 != null ? Base58.decode(this.secret58) : null; + if (secret != null && secret.length == Transformer.AES256_LENGTH) { + try { + Path unencryptedPath = Paths.get(this.workingPath.toString(), "zipped.zip"); + SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, "AES"); + AES.decryptFile("AES", aesKey, this.filePath.toString(), unencryptedPath.toString()); + + // Replace filePath pointer with the encrypted file path + // Don't delete the original ArbitraryDataFile, as this is handled in the cleanup phase + this.filePath = unencryptedPath; + + } catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException + | BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) { + // TODO: delete files and block this resource if privateDataEnabled is false + throw new DataException(String.format("Unable to decrypt file at path %s: %s", this.filePath, e.getMessage())); + } + } else { + // Assume it is unencrypted. This will be the case when we have built a custom path by combining + // multiple decrypted archives into a single state. + } + } + + private void uncompress() throws IOException, DataException { + if (this.filePath == null || !Files.exists(this.filePath)) { + throw new DataException("Can't uncompress non-existent file path"); + } + File file = new File(this.filePath.toString()); + if (file.isDirectory()) { + // Already a directory - nothing to uncompress + // We still need to copy the directory to its final destination if it's not already there + this.moveFilePathToFinalDestination(); + return; + } + + try { + // Default to ZIP compression - this is needed for previews + Compression compression = transactionData != null ? transactionData.getCompression() : Compression.ZIP; + + // Handle each type of compression + if (compression == Compression.ZIP) { + ZipUtils.unzip(this.filePath.toString(), this.uncompressedPath.getParent().toString()); + } + else if (compression == Compression.NONE) { + Files.createDirectories(this.uncompressedPath); + Path finalPath = Paths.get(this.uncompressedPath.toString(), "data"); + this.filePath.toFile().renameTo(finalPath.toFile()); + } + else { + throw new DataException(String.format("Unrecognized compression type: %s", transactionData.getCompression())); + } + } catch (IOException e) { + throw new DataException(String.format("Unable to unzip file: %s", e.getMessage())); + } + + // Replace filePath pointer with the uncompressed file path + if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) { + if (Files.exists(this.filePath)) { + Files.delete(this.filePath); + } + } + this.filePath = this.uncompressedPath; + } + + private void validate() throws IOException, DataException { + if (this.service.isValidationRequired()) { + Service.ValidationResult result = this.service.validate(this.filePath); + if (result != Service.ValidationResult.OK) { + throw new DataException(String.format("Validation of %s failed: %s", this.service, result.toString())); + } + } + } + + + private void moveFilePathToFinalDestination() throws IOException, DataException { + if (this.filePath.compareTo(this.uncompressedPath) != 0) { + File source = new File(this.filePath.toString()); + File dest = new File(this.uncompressedPath.toString()); + if (!source.exists()) { + throw new DataException("Source directory doesn't exist"); + } + // Ensure destination directory doesn't exist + FileUtils.deleteDirectory(dest); + // Move files to destination + FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString()); + + try { + // Delete existing + if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) { + File directory = new File(this.filePath.toString()); + FileUtils.deleteDirectory(directory); + } + + // ... and its parent directory if empty + Path parentDirectory = this.filePath.getParent(); + if (FilesystemUtils.pathInsideDataOrTempPath(parentDirectory)) { + Files.deleteIfExists(parentDirectory); + } + + } catch (DirectoryNotEmptyException e) { + // No need to log anything + } catch (IOException e) { + // This will eventually be cleaned up by a maintenance process, so log the error and continue + LOGGER.debug("Unable to cleanup directories: {}", e.getMessage()); + } + + // Finally, update filePath to point to uncompressedPath + this.filePath = this.uncompressedPath; + } + } + + + public void setTransactionData(ArbitraryTransactionData transactionData) { + this.transactionData = transactionData; + } + + public void setSecret58(String secret58) { + this.secret58 = secret58; + } + + public Path getFilePath() { + return this.filePath; + } + + public int getLayerCount() { + return this.layerCount; + } + + public byte[] getLatestSignature() { + return this.latestSignature; + } + + /** + * Use the below setter to ensure that we only read existing + * data without requesting any missing files, + * + * @param canRequestMissingFiles - whether or not fetching missing files is allowed + */ + public void setCanRequestMissingFiles(boolean canRequestMissingFiles) { + this.canRequestMissingFiles = canRequestMissingFiles; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java new file mode 100644 index 00000000..67b4c42b --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java @@ -0,0 +1,212 @@ +package org.qortal.arbitrary; + +import com.google.common.io.Resources; +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.HTMLParser; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.Controller; +import org.qortal.repository.DataException; +import org.qortal.settings.Settings; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +public class ArbitraryDataRenderer { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataRenderer.class); + + private final String resourceId; + private final ResourceIdType resourceIdType; + private final Service service; + private String inPath; + private final String secret58; + private final String prefix; + private final boolean usePrefix; + private final boolean async; + private final HttpServletRequest request; + private final HttpServletResponse response; + private final ServletContext context; + + public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String inPath, + String secret58, String prefix, boolean usePrefix, boolean async, + HttpServletRequest request, HttpServletResponse response, ServletContext context) { + + this.resourceId = resourceId; + this.resourceIdType = resourceIdType; + this.service = service; + this.inPath = inPath; + this.secret58 = secret58; + this.prefix = prefix; + this.usePrefix = usePrefix; + this.async = async; + this.request = request; + this.response = response; + this.context = context; + } + + public HttpServletResponse render() { + if (!inPath.startsWith(File.separator)) { + inPath = File.separator + inPath; + } + + // Don't render data if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return ArbitraryDataRenderer.getResponse(response, 500, "QDN is disabled in settings"); + } + + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, null); + arbitraryDataReader.setSecret58(secret58); // Optional, used for loading encrypted file hashes only + try { + if (!arbitraryDataReader.isCachedDataAvailable()) { + // If async is requested, show a loading screen whilst build is in progress + if (async) { + arbitraryDataReader.loadAsynchronously(); + return this.getLoadingResponse(service, resourceId); + } + + // Otherwise, loop until we have data + int attempts = 0; + while (!Controller.isStopping()) { + attempts++; + if (!arbitraryDataReader.isBuilding()) { + try { + arbitraryDataReader.loadSynchronously(false); + break; + } catch (MissingDataException e) { + if (attempts > 5) { + // Give up after 5 attempts + return ArbitraryDataRenderer.getResponse(response, 404, "Data unavailable. Please try again later."); + } + } + } + Thread.sleep(3000L); + } + } + + } catch (Exception e) { + LOGGER.info(String.format("Unable to load %s %s: %s", service, resourceId, e.getMessage())); + return ArbitraryDataRenderer.getResponse(response, 500, "Error 500: Internal Server Error"); + } + + java.nio.file.Path path = arbitraryDataReader.getFilePath(); + if (path == null) { + return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found"); + } + String unzippedPath = path.toString(); + + try { + String filename = this.getFilename(unzippedPath, inPath); + String filePath = Paths.get(unzippedPath, filename).toString(); + + if (HTMLParser.isHtmlFile(filename)) { + // HTML file - needs to be parsed + byte[] data = Files.readAllBytes(Paths.get(filePath)); // TODO: limit file size that can be read into memory + HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, usePrefix, data); + htmlParser.setDocumentBaseUrl(); + response.setContentType(context.getMimeType(filename)); + response.setContentLength(htmlParser.getData().length); + response.getOutputStream().write(htmlParser.getData()); + } + else { + // Regular file - can be streamed directly + File file = new File(filePath); + FileInputStream inputStream = new FileInputStream(file); + response.setContentType(context.getMimeType(filename)); + int bytesRead, length = 0; + byte[] buffer = new byte[10240]; + while ((bytesRead = inputStream.read(buffer)) != -1) { + response.getOutputStream().write(buffer, 0, bytesRead); + length += bytesRead; + } + response.setContentLength(length); + inputStream.close(); + } + return response; + } catch (FileNotFoundException | NoSuchFileException e) { + LOGGER.info("Unable to serve file: {}", e.getMessage()); + if (inPath.equals("/")) { + // Delete the unzipped folder if no index file was found + try { + FileUtils.deleteDirectory(new File(unzippedPath)); + } catch (IOException ioException) { + LOGGER.debug("Unable to delete directory: {}", unzippedPath, e); + } + } + } catch (IOException e) { + LOGGER.info("Unable to serve file at path {}: {}", inPath, e.getMessage()); + } + + return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found"); + } + + private String getFilename(String directory, String userPath) { + if (userPath == null || userPath.endsWith("/") || userPath.equals("")) { + // Locate index file + List indexFiles = ArbitraryDataRenderer.indexFiles(); + for (String indexFile : indexFiles) { + Path path = Paths.get(directory, indexFile); + if (Files.exists(path)) { + return userPath + indexFile; + } + } + } + return userPath; + } + + private HttpServletResponse getLoadingResponse(Service service, String name) { + String responseString = ""; + URL url = Resources.getResource("loading/index.html"); + try { + responseString = Resources.toString(url, StandardCharsets.UTF_8); + + // Replace vars + responseString = responseString.replace("%%SERVICE%%", service.toString()); + responseString = responseString.replace("%%NAME%%", name); + + } catch (IOException e) { + LOGGER.info("Unable to show loading screen: {}", e.getMessage()); + } + return ArbitraryDataRenderer.getResponse(response, 503, responseString); + } + + public static HttpServletResponse getResponse(HttpServletResponse response, int responseCode, String responseString) { + try { + byte[] responseData = responseString.getBytes(); + response.setStatus(responseCode); + response.setContentLength(responseData.length); + response.getOutputStream().write(responseData); + } catch (IOException e) { + LOGGER.info("Error writing {} response", responseCode); + } + return response; + } + + public static List indexFiles() { + List indexFiles = new ArrayList<>(); + indexFiles.add("index.html"); + indexFiles.add("index.htm"); + indexFiles.add("default.html"); + indexFiles.add("default.htm"); + indexFiles.add("home.html"); + indexFiles.add("home.htm"); + return indexFiles; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java new file mode 100644 index 00000000..6bbc7c12 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java @@ -0,0 +1,301 @@ +package org.qortal.arbitrary; + +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataBuildManager; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.data.arbitrary.ArbitraryResourceStatus; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.list.ResourceListManager; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.ArbitraryTransactionUtils; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import static org.qortal.data.arbitrary.ArbitraryResourceStatus.Status; + +public class ArbitraryDataResource { + + protected final String resourceId; + protected final ResourceIdType resourceIdType; + protected final Service service; + protected final String identifier; + + private List transactions; + private ArbitraryTransactionData latestPutTransaction; + private int layerCount; + + public ArbitraryDataResource(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) { + this.resourceId = resourceId.toLowerCase(); + this.resourceIdType = resourceIdType; + this.service = service; + + // If identifier is a blank string, or reserved keyword "default", treat it as null + if (identifier == null || identifier.equals("") || identifier.equals("default")) { + identifier = null; + } + this.identifier = identifier; + } + + public ArbitraryResourceStatus getStatus() { + if (resourceIdType != ResourceIdType.NAME) { + // We only support statuses for resources with a name + return new ArbitraryResourceStatus(Status.UNSUPPORTED); + } + + // Check if the name is blocked + if (ResourceListManager.getInstance() + .listContains("blockedNames", this.resourceId, false)) { + return new ArbitraryResourceStatus(Status.BLOCKED); + } + + // Firstly check the cache to see if it's already built + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader( + resourceId, resourceIdType, service, identifier); + if (arbitraryDataReader.isCachedDataAvailable()) { + return new ArbitraryResourceStatus(Status.READY); + } + + // Next check if there's a build in progress + ArbitraryDataBuildQueueItem queueItem = + new ArbitraryDataBuildQueueItem(resourceId, resourceIdType, service, identifier); + if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(queueItem)) { + return new ArbitraryResourceStatus(Status.BUILDING); + } + + // Check if a build has failed + if (ArbitraryDataBuildManager.getInstance().isInFailedBuildsList(queueItem)) { + return new ArbitraryResourceStatus(Status.BUILD_FAILED); + } + + // Check if we have all data locally for this resource + if (!this.allFilesDownloaded()) { + if (this.isDownloading()) { + return new ArbitraryResourceStatus(Status.DOWNLOADING); + } + else if (this.isDataPotentiallyAvailable()) { + return new ArbitraryResourceStatus(Status.NOT_STARTED); + } + return new ArbitraryResourceStatus(Status.MISSING_DATA); + } + + // We have all data locally + return new ArbitraryResourceStatus(Status.DOWNLOADED); + } + + public boolean delete() { + try { + this.fetchTransactions(); + + List transactionDataList = new ArrayList<>(this.transactions); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + byte[] hash = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + byte[] signature = transactionData.getSignature(); + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + // Delete any chunks or complete files from each transaction + arbitraryDataFile.deleteAll(); + } + + // Also delete cached data for the entire resource + this.deleteCache(); + + return true; + + } catch (DataException | IOException e) { + return false; + } + } + + public void deleteCache() throws IOException { + String baseDir = Settings.getInstance().getTempDataPath(); + String identifier = this.identifier != null ? this.identifier : "default"; + Path cachePath = Paths.get(baseDir, "reader", this.resourceIdType.toString(), this.resourceId, this.service.toString(), identifier); + if (cachePath.toFile().exists()) { + FilesystemUtils.safeDeleteDirectory(cachePath, true); + } + } + + private boolean allFilesDownloaded() { + try { + this.fetchTransactions(); + + List transactionDataList = new ArrayList<>(this.transactions); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + if (!ArbitraryTransactionUtils.completeFileExists(transactionData) || + !ArbitraryTransactionUtils.allChunksExist(transactionData)) { + return false; + } + } + return true; + + } catch (DataException e) { + return false; + } + } + + private boolean isRateLimited() { + try { + this.fetchTransactions(); + + List transactionDataList = new ArrayList<>(this.transactions); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + if (ArbitraryDataManager.getInstance().isSignatureRateLimited(transactionData.getSignature())) { + return true; + } + } + return true; + + } catch (DataException e) { + return false; + } + } + + /** + * Best guess as to whether data might be available + * This is only used to give an indication to the user of progress + * @return - whether data might be available on the network + */ + private boolean isDataPotentiallyAvailable() { + try { + this.fetchTransactions(); + Long now = NTP.getTime(); + if (now == null) { + return false; + } + + List transactionDataList = new ArrayList<>(this.transactions); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + long lastRequestTime = ArbitraryDataManager.getInstance().lastRequestForSignature(transactionData.getSignature()); + // If we haven't requested yet, or requested in the last 30 seconds, there's still a + // chance that data is on its way but hasn't arrived yet + if (lastRequestTime == 0 || now - lastRequestTime < 30 * 1000L) { + return true; + } + } + return false; + + } catch (DataException e) { + return false; + } + } + + + /** + * Best guess as to whether we are currently downloading a resource + * This is only used to give an indication to the user of progress + * @return - whether we are trying to download the resource + */ + private boolean isDownloading() { + try { + this.fetchTransactions(); + Long now = NTP.getTime(); + if (now == null) { + return false; + } + + List transactionDataList = new ArrayList<>(this.transactions); + + for (ArbitraryTransactionData transactionData : transactionDataList) { + long lastRequestTime = ArbitraryDataManager.getInstance().lastRequestForSignature(transactionData.getSignature()); + // If were have requested data in the last 30 seconds, treat it as "downloading" + if (lastRequestTime > 0 && now - lastRequestTime < 30 * 1000L) { + return true; + } + } + + // FUTURE: we may want to check for file hashes (including the metadata file hash) in + // ArbitraryDataManager.arbitraryDataFileRequests and return true if one is found. + + return false; + + } catch (DataException e) { + return false; + } + } + + + + private void fetchTransactions() throws DataException { + if (this.transactions != null && !this.transactions.isEmpty()) { + // Already fetched + return; + } + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Get the most recent PUT + ArbitraryTransactionData latestPut = repository.getArbitraryRepository() + .getLatestTransaction(this.resourceId, this.service, ArbitraryTransactionData.Method.PUT, this.identifier); + if (latestPut == null) { + String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s", + this.resourceId, this.service, this.identifierString()); + throw new DataException(message); + } + this.latestPutTransaction = latestPut; + + // Load all transactions since the latest PUT + List transactionDataList = repository.getArbitraryRepository() + .getArbitraryTransactions(this.resourceId, this.service, this.identifier, latestPut.getTimestamp()); + + this.transactions = transactionDataList; + this.layerCount = transactionDataList.size(); + } + } + + private String resourceIdString() { + return resourceId != null ? resourceId : ""; + } + + private String resourceIdTypeString() { + return resourceIdType != null ? resourceIdType.toString() : ""; + } + + private String serviceString() { + return service != null ? service.toString() : ""; + } + + private String identifierString() { + return identifier != null ? identifier : ""; + } + + @Override + public String toString() { + return String.format("%s %s %s", this.serviceString(), this.resourceIdString(), this.identifierString()); + } + + + /** + * @return unique key used to identify this resource + */ + public String getUniqueKey() { + return String.format("%s-%s-%s", this.service, this.resourceId, this.identifier).toLowerCase(); + } + + public String getResourceId() { + return this.resourceId; + } + + public Service getService() { + return this.service; + } + + public String getIdentifier() { + return this.identifier; + } +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java new file mode 100644 index 00000000..442461e1 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java @@ -0,0 +1,285 @@ +package org.qortal.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType; +import org.qortal.arbitrary.ArbitraryDataDiff.*; +import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch; +import org.qortal.arbitrary.misc.Service; +import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; +import org.qortal.data.PaymentData; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.data.transaction.BaseTransactionData; +import org.qortal.group.Group; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.transaction.ArbitraryTransaction; +import org.qortal.transaction.Transaction; +import org.qortal.transform.Transformer; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class ArbitraryDataTransactionBuilder { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataTransactionBuilder.class); + + // Min transaction version required + private static final int MIN_TRANSACTION_VERSION = 5; + + // Maximum number of PATCH layers allowed + private static final int MAX_LAYERS = 10; + // Maximum size difference (out of 1) allowed for PATCH transactions + private static final double MAX_SIZE_DIFF = 0.2f; + // Maximum proportion of files modified relative to total + private static final double MAX_FILE_DIFF = 0.5f; + + private final String publicKey58; + private final Path path; + private final String name; + private Method method; + private final Service service; + private final String identifier; + private final Repository repository; + + private int chunkSize = ArbitraryDataFile.CHUNK_SIZE; + + private ArbitraryTransactionData arbitraryTransactionData; + private ArbitraryDataFile arbitraryDataFile; + + public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name, + Method method, Service service, String identifier) { + this.repository = repository; + this.publicKey58 = publicKey58; + this.path = path; + this.name = name; + this.method = method; + this.service = service; + + // If identifier is a blank string, or reserved keyword "default", treat it as null + if (identifier == null || identifier.equals("") || identifier.equals("default")) { + identifier = null; + } + this.identifier = identifier; + } + + public void build() throws DataException { + try { + this.preExecute(); + this.checkMethod(); + this.createTransaction(); + } + finally { + this.postExecute(); + } + } + + private void preExecute() { + + } + + private void postExecute() { + + } + + private void checkMethod() throws DataException { + if (this.method == null) { + // We need to automatically determine the method + this.method = this.determineMethodAutomatically(); + } + } + + private Method determineMethodAutomatically() throws DataException { + ArbitraryDataReader reader = new ArbitraryDataReader(this.name, ResourceIdType.NAME, this.service, this.identifier); + try { + reader.loadSynchronously(true); + } catch (Exception e) { + // Catch all exceptions if the existing resource cannot be loaded first time + // In these cases it's simplest to just use a PUT transaction + return Method.PUT; + } + + try { + // Check layer count + int layerCount = reader.getLayerCount(); + if (layerCount >= MAX_LAYERS) { + LOGGER.info("Reached maximum layer count ({} / {}) - using PUT", layerCount, MAX_LAYERS); + return Method.PUT; + } + + // Check size of differences between this layer and previous layer + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(reader.getFilePath(), this.path, reader.getLatestSignature()); + patch.create(); + long diffSize = FilesystemUtils.getDirectorySize(patch.getFinalPath()); + long existingStateSize = FilesystemUtils.getDirectorySize(reader.getFilePath()); + double difference = (double) diffSize / (double) existingStateSize; + if (difference > MAX_SIZE_DIFF) { + LOGGER.info("Reached maximum difference ({} / {}) - using PUT", difference, MAX_SIZE_DIFF); + return Method.PUT; + } + + // Check number of modified files + ArbitraryDataMetadataPatch metadata = patch.getMetadata(); + int totalFileCount = patch.getTotalFileCount(); + int differencesCount = metadata.getFileDifferencesCount(); + difference = (double) differencesCount / (double) totalFileCount; + if (difference > MAX_FILE_DIFF) { + LOGGER.info("Reached maximum file differences ({} / {}) - using PUT", difference, MAX_FILE_DIFF); + return Method.PUT; + } + + // Check the patch types + // Limit this check to single file resources only for now + boolean atLeastOnePatch = false; + if (totalFileCount == 1) { + for (ModifiedPath path : metadata.getModifiedPaths()) { + if (path.getDiffType() != DiffType.COMPLETE_FILE) { + atLeastOnePatch = true; + } + } + } + if (!atLeastOnePatch) { + LOGGER.info("Patch consists of complete files only - using PUT"); + return Method.PUT; + } + + // State is appropriate for a PATCH transaction + return Method.PATCH; + } + catch (IOException | DataException e) { + // Handle matching states separately, as it's best to block transactions with duplicate states + if (e.getMessage().equals("Current state matches previous state. Nothing to do.")) { + throw new DataException(e.getMessage()); + } + LOGGER.info("Caught exception: {}", e.getMessage()); + LOGGER.info("Unable to load existing resource - using PUT to overwrite it."); + return Method.PUT; + } + } + + private void createTransaction() throws DataException { + arbitraryDataFile = null; + try { + Long now = NTP.getTime(); + if (now == null) { + throw new DataException("NTP time not synced yet"); + } + + // Ensure that this chain supports transactions necessary for complex arbitrary data + int transactionVersion = Transaction.getVersionByTimestamp(now); + if (transactionVersion < MIN_TRANSACTION_VERSION) { + throw new DataException("Transaction version unsupported on this blockchain."); + } + + if (publicKey58 == null || path == null) { + throw new DataException("Missing public key or path"); + } + byte[] creatorPublicKey = Base58.decode(publicKey58); + final String creatorAddress = Crypto.toAddress(creatorPublicKey); + byte[] lastReference = repository.getAccountRepository().getLastReference(creatorAddress); + if (lastReference == null) { + // Use a random last reference on the very first transaction for an account + // Code copied from CrossChainResource.buildAtMessage() + // We already require PoW on all arbitrary transactions, so no additional logic is needed + Random random = new Random(); + lastReference = new byte[Transformer.SIGNATURE_LENGTH]; + random.nextBytes(lastReference); + } + + Compression compression = Compression.ZIP; + + // FUTURE? Use zip compression for directories, or no compression for single files + // Compression compression = (path.toFile().isDirectory()) ? Compression.ZIP : Compression.NONE; + + ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method, compression); + try { + arbitraryDataWriter.setChunkSize(this.chunkSize); + arbitraryDataWriter.save(); + } catch (IOException | DataException | InterruptedException | RuntimeException | MissingDataException e) { + LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage()); + throw new DataException(e.getMessage()); + } + + // Get main file + arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile(); + if (arbitraryDataFile == null) { + throw new DataException("Arbitrary data file is null"); + } + + // Get chunks metadata file + ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile(); + if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) { + throw new DataException(String.format("Chunks metadata data file is null but there are %d chunks", arbitraryDataFile.chunkCount())); + } + + String digest58 = arbitraryDataFile.digest58(); + if (digest58 == null) { + LOGGER.error("Unable to calculate file digest"); + throw new DataException("Unable to calculate file digest"); + } + + final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP, + lastReference, creatorPublicKey, 0L, null); + final int size = (int) arbitraryDataFile.size(); + final int version = 5; + final int nonce = 0; + byte[] secret = arbitraryDataFile.getSecret(); + final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH; + final byte[] digest = arbitraryDataFile.digest(); + final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null; + final List payments = new ArrayList<>(); + + ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData, + version, service, nonce, size, name, identifier, method, + secret, compression, digest, dataType, metadataHash, payments); + + this.arbitraryTransactionData = transactionData; + + } catch (DataException e) { + if (arbitraryDataFile != null) { + arbitraryDataFile.deleteAll(); + } + throw(e); + } + + } + + public void computeNonce() throws DataException { + if (this.arbitraryTransactionData == null) { + throw new DataException("Arbitrary transaction data is required to compute nonce"); + } + + ArbitraryTransaction transaction = (ArbitraryTransaction) Transaction.fromData(repository, this.arbitraryTransactionData); + LOGGER.info("Computing nonce..."); + transaction.computeNonce(); + + Transaction.ValidationResult result = transaction.isValidUnconfirmed(); + if (result != Transaction.ValidationResult.OK) { + arbitraryDataFile.deleteAll(); + throw new DataException(String.format("Arbitrary transaction invalid: %s", result)); + } + LOGGER.info("Transaction is valid"); + } + + public ArbitraryTransactionData getArbitraryTransactionData() { + return this.arbitraryTransactionData; + } + + public ArbitraryDataFile getArbitraryDataFile() { + return this.arbitraryDataFile; + } + + public void setChunkSize(int chunkSize) { + this.chunkSize = chunkSize; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataWriter.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataWriter.java new file mode 100644 index 00000000..9204a069 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataWriter.java @@ -0,0 +1,342 @@ +package org.qortal.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata; +import org.qortal.arbitrary.misc.Service; +import org.qortal.crypto.Crypto; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.crypto.AES; +import org.qortal.repository.DataException; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.ZipUtils; + +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +public class ArbitraryDataWriter { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataWriter.class); + + private Path filePath; + private final String name; + private final Service service; + private final String identifier; + private final Method method; + private final Compression compression; + + private int chunkSize = ArbitraryDataFile.CHUNK_SIZE; + + private SecretKey aesKey; + private ArbitraryDataFile arbitraryDataFile; + + // Intermediate paths to cleanup + private Path workingPath; + private Path compressedPath; + private Path encryptedPath; + + public ArbitraryDataWriter(Path filePath, String name, Service service, String identifier, Method method, Compression compression) { + this.filePath = filePath; + this.name = name; + this.service = service; + this.method = method; + this.compression = compression; + + // If identifier is a blank string, or reserved keyword "default", treat it as null + if (identifier == null || identifier.equals("") || identifier.equals("default")) { + identifier = null; + } + this.identifier = identifier; + } + + public void save() throws IOException, DataException, InterruptedException, MissingDataException { + try { + this.preExecute(); + this.validateService(); + this.process(); + this.compress(); + this.encrypt(); + this.split(); + this.createMetadataFile(); + this.validate(); + + } finally { + this.postExecute(); + } + } + + private void preExecute() throws DataException { + this.checkEnabled(); + + // Enforce compression when uploading a directory + File file = new File(this.filePath.toString()); + if (file.isDirectory() && compression == Compression.NONE) { + throw new DataException("Unable to upload a directory without compression"); + } + + // Create temporary working directory + this.createWorkingDirectory(); + } + + private void postExecute() throws IOException { + this.cleanupFilesystem(); + } + + private void checkEnabled() throws DataException { + if (!Settings.getInstance().isQdnEnabled()) { + throw new DataException("QDN is disabled in settings"); + } + } + + private void createWorkingDirectory() throws DataException { + // Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware + String baseDir = Settings.getInstance().getTempDataPath(); + String identifier = Base58.encode(Crypto.digest(this.filePath.toString().getBytes())); + Path tempDir = Paths.get(baseDir, "writer", identifier); + try { + Files.createDirectories(tempDir); + } catch (IOException e) { + throw new DataException("Unable to create temp directory"); + } + this.workingPath = tempDir; + } + + private void validateService() throws IOException, DataException { + if (this.service.isValidationRequired()) { + Service.ValidationResult result = this.service.validate(this.filePath); + if (result != Service.ValidationResult.OK) { + throw new DataException(String.format("Validation of %s failed: %s", this.service, result.toString())); + } + } + } + + private void process() throws DataException, IOException, MissingDataException { + switch (this.method) { + + case PUT: + // Nothing to do + break; + + case PATCH: + this.processPatch(); + break; + + default: + throw new DataException(String.format("Unknown method specified: %s", method.toString())); + } + } + + private void processPatch() throws DataException, IOException, MissingDataException { + + // Build the existing state using past transactions + ArbitraryDataBuilder builder = new ArbitraryDataBuilder(this.name, this.service, this.identifier); + builder.build(); + Path builtPath = builder.getFinalPath(); + + // Obtain the latest signature, so this can be included in the patch + byte[] latestSignature = builder.getLatestSignature(); + + // Compute a diff of the latest changes on top of the previous state + // Then use only the differences as our data payload + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(builtPath, this.filePath, latestSignature); + patch.create(); + this.filePath = patch.getFinalPath(); + + // Delete the input directory + if (FilesystemUtils.pathInsideDataOrTempPath(builtPath)) { + File directory = new File(builtPath.toString()); + FileUtils.deleteDirectory(directory); + } + + // Validate the patch + this.validatePatch(); + } + + private void validatePatch() throws DataException { + if (this.filePath == null) { + throw new DataException("Null path after creating patch"); + } + + File qortalMetadataDirectoryFile = Paths.get(this.filePath.toString(), ".qortal").toFile(); + if (!qortalMetadataDirectoryFile.exists()) { + throw new DataException("Qortal metadata folder doesn't exist in patch"); + } + if (!qortalMetadataDirectoryFile.isDirectory()) { + throw new DataException("Qortal metadata folder isn't a directory"); + } + + File qortalPatchMetadataFile = Paths.get(this.filePath.toString(), ".qortal", "patch").toFile(); + if (!qortalPatchMetadataFile.exists()) { + throw new DataException("Qortal patch metadata file doesn't exist in patch"); + } + if (!qortalPatchMetadataFile.isFile()) { + throw new DataException("Qortal patch metadata file isn't a file"); + } + } + + private void compress() throws InterruptedException, DataException { + // Compress the data if requested + if (this.compression != Compression.NONE) { + this.compressedPath = Paths.get(this.workingPath.toString(), "data.zip"); + try { + + if (this.compression == Compression.ZIP) { + LOGGER.info("Compressing..."); + String enclosingFolderName = "data"; + ZipUtils.zip(this.filePath.toString(), this.compressedPath.toString(), enclosingFolderName); + } + else { + throw new DataException(String.format("Unknown compression type specified: %s", compression.toString())); + } + // FUTURE: other compression types + + // Delete the input directory + if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) { + File directory = new File(this.filePath.toString()); + FileUtils.deleteDirectory(directory); + } + // Replace filePath pointer with the zipped file path + this.filePath = this.compressedPath; + + } catch (IOException | DataException e) { + throw new DataException("Unable to zip directory", e); + } + } + } + + private void encrypt() throws DataException { + this.encryptedPath = Paths.get(this.workingPath.toString(), "data.zip.encrypted"); + try { + // Encrypt the file with AES + LOGGER.info("Encrypting..."); + this.aesKey = AES.generateKey(256); + AES.encryptFile("AES", this.aesKey, this.filePath.toString(), this.encryptedPath.toString()); + + // Delete the input file + if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) { + Files.delete(this.filePath); + } + // Replace filePath pointer with the encrypted file path + this.filePath = this.encryptedPath; + + } catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException + | BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) { + throw new DataException(String.format("Unable to encrypt file %s: %s", this.filePath, e.getMessage())); + } + } + + private void split() throws IOException, DataException { + // We don't have a signature yet, so use null to put the file in a generic folder + this.arbitraryDataFile = ArbitraryDataFile.fromPath(this.filePath, null); + if (this.arbitraryDataFile == null) { + throw new IOException("No file available when trying to split"); + } + + int chunkCount = this.arbitraryDataFile.split(this.chunkSize); + if (chunkCount > 0) { + LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s"))); + } + else { + throw new DataException("Unable to split file into chunks"); + } + } + + private void createMetadataFile() throws IOException, DataException { + // If we have at least one chunk, we need to create an index file containing their hashes + if (this.arbitraryDataFile.chunkCount() > 1) { + // Create the JSON file + Path chunkFilePath = Paths.get(this.workingPath.toString(), "metadata.json"); + ArbitraryDataTransactionMetadata chunkMetadata = new ArbitraryDataTransactionMetadata(chunkFilePath); + chunkMetadata.setChunks(this.arbitraryDataFile.chunkHashList()); + chunkMetadata.write(); + + // Create an ArbitraryDataFile from the JSON file (we don't have a signature yet) + ArbitraryDataFile metadataFile = ArbitraryDataFile.fromPath(chunkFilePath, null); + this.arbitraryDataFile.setMetadataFile(metadataFile); + } + } + + private void validate() throws IOException, DataException { + if (this.arbitraryDataFile == null) { + throw new DataException("No file available when validating"); + } + this.arbitraryDataFile.setSecret(this.aesKey.getEncoded()); + + // Validate the file + ValidationResult validationResult = this.arbitraryDataFile.isValid(); + if (validationResult != ValidationResult.OK) { + throw new DataException(String.format("File %s failed validation: %s", this.arbitraryDataFile, validationResult)); + } + LOGGER.info("Whole file hash is valid: {}", this.arbitraryDataFile.digest58()); + + // Validate each chunk + for (ArbitraryDataFileChunk chunk : this.arbitraryDataFile.getChunks()) { + validationResult = chunk.isValid(); + if (validationResult != ValidationResult.OK) { + throw new DataException(String.format("Chunk %s failed validation: %s", chunk, validationResult)); + } + } + LOGGER.info("Chunk hashes are valid"); + + // Validate chunks metadata file + if (this.arbitraryDataFile.chunkCount() > 1) { + ArbitraryDataFile metadataFile = this.arbitraryDataFile.getMetadataFile(); + if (metadataFile == null || !metadataFile.exists()) { + throw new DataException("No metadata file available, but there are multiple chunks"); + } + // Read the file + ArbitraryDataTransactionMetadata metadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath()); + metadata.read(); + // Check all chunks exist + for (byte[] chunk : this.arbitraryDataFile.chunkHashList()) { + if (!metadata.containsChunk(chunk)) { + throw new DataException(String.format("Missing chunk %s in metadata file", Base58.encode(chunk))); + } + } + } + } + + private void cleanupFilesystem() throws IOException { + // Clean up + if (FilesystemUtils.pathInsideDataOrTempPath(this.compressedPath)) { + File zippedFile = new File(this.compressedPath.toString()); + if (zippedFile.exists()) { + zippedFile.delete(); + } + } + if (FilesystemUtils.pathInsideDataOrTempPath(this.encryptedPath)) { + File encryptedFile = new File(this.encryptedPath.toString()); + if (encryptedFile.exists()) { + encryptedFile.delete(); + } + } + if (FilesystemUtils.pathInsideDataOrTempPath(this.workingPath)) { + FileUtils.deleteDirectory(new File(this.workingPath.toString())); + } + } + + + public ArbitraryDataFile getArbitraryDataFile() { + return this.arbitraryDataFile; + } + + public void setChunkSize(int chunkSize) { + this.chunkSize = chunkSize; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/exception/MissingDataException.java b/src/main/java/org/qortal/arbitrary/exception/MissingDataException.java new file mode 100644 index 00000000..63f617c0 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/exception/MissingDataException.java @@ -0,0 +1,20 @@ +package org.qortal.arbitrary.exception; + +public class MissingDataException extends Exception { + + public MissingDataException() { + } + + public MissingDataException(String message) { + super(message); + } + + public MissingDataException(String message, Throwable cause) { + super(message, cause); + } + + public MissingDataException(Throwable cause) { + super(cause); + } + +} diff --git a/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadata.java b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadata.java new file mode 100644 index 00000000..127fefb5 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadata.java @@ -0,0 +1,85 @@ +package org.qortal.arbitrary.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.repository.DataException; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * ArbitraryDataMetadata + * + * This is a base class to handle reading and writing JSON to the supplied filePath. + * + * It is not usable on its own; it must be subclassed, with two methods overridden: + * + * readJson() - code to unserialize the JSON file + * buildJson() - code to serialize the JSON file + * + */ +public class ArbitraryDataMetadata { + + protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadata.class); + + protected Path filePath; + + protected String jsonString; + + public ArbitraryDataMetadata(Path filePath) { + this.filePath = filePath; + } + + protected void readJson() throws DataException { + // To be overridden + } + + protected void buildJson() { + // To be overridden + } + + + public void read() throws IOException, DataException { + this.loadJson(); + this.readJson(); + } + + public void write() throws IOException, DataException { + this.buildJson(); + this.createParentDirectories(); + + BufferedWriter writer = new BufferedWriter(new FileWriter(this.filePath.toString())); + writer.write(this.jsonString); + writer.newLine(); + writer.close(); + } + + + protected void loadJson() throws IOException { + File metadataFile = new File(this.filePath.toString()); + if (!metadataFile.exists()) { + throw new IOException(String.format("Metadata file doesn't exist: %s", this.filePath.toString())); + } + + this.jsonString = new String(Files.readAllBytes(this.filePath)); + } + + + protected void createParentDirectories() throws DataException { + try { + Files.createDirectories(this.filePath.getParent()); + } catch (IOException e) { + throw new DataException("Unable to create parent directories"); + } + } + + + public String getJsonString() { + return this.jsonString; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataCache.java b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataCache.java new file mode 100644 index 00000000..bd6bb219 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataCache.java @@ -0,0 +1,69 @@ +package org.qortal.arbitrary.metadata; + +import org.json.JSONObject; +import org.qortal.repository.DataException; +import org.qortal.utils.Base58; + +import java.nio.file.Path; + +public class ArbitraryDataMetadataCache extends ArbitraryDataQortalMetadata { + + private byte[] signature; + private long timestamp; + + public ArbitraryDataMetadataCache(Path filePath) { + super(filePath); + + } + + @Override + protected String fileName() { + return "cache"; + } + + @Override + protected void readJson() throws DataException { + if (this.jsonString == null) { + throw new DataException("Patch JSON string is null"); + } + + JSONObject cache = new JSONObject(this.jsonString); + if (cache.has("signature")) { + String sig = cache.getString("signature"); + if (sig != null) { + this.signature = Base58.decode(sig); + } + } + if (cache.has("timestamp")) { + this.timestamp = cache.getLong("timestamp"); + } + } + + @Override + protected void buildJson() { + JSONObject patch = new JSONObject(); + patch.put("signature", Base58.encode(this.signature)); + patch.put("timestamp", this.timestamp); + + this.jsonString = patch.toString(2); + LOGGER.trace("Cache metadata: {}", this.jsonString); + } + + + public void setSignature(byte[] signature) { + this.signature = signature; + } + + public byte[] getSignature() { + return this.signature; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + public long getTimestamp() { + return this.timestamp; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataPatch.java b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataPatch.java new file mode 100644 index 00000000..954dcb03 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataMetadataPatch.java @@ -0,0 +1,182 @@ +package org.qortal.arbitrary.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONObject; +import org.qortal.arbitrary.ArbitraryDataDiff.*; +import org.qortal.repository.DataException; +import org.qortal.utils.Base58; + +import java.lang.reflect.Field; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; + +public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadataPatch.class); + + private List addedPaths; + private List modifiedPaths; + private List removedPaths; + private byte[] previousSignature; + private byte[] previousHash; + private byte[] currentHash; + + public ArbitraryDataMetadataPatch(Path filePath) { + super(filePath); + + this.addedPaths = new ArrayList<>(); + this.modifiedPaths = new ArrayList<>(); + this.removedPaths = new ArrayList<>(); + } + + @Override + protected String fileName() { + return "patch"; + } + + @Override + protected void readJson() throws DataException { + if (this.jsonString == null) { + throw new DataException("Patch JSON string is null"); + } + + JSONObject patch = new JSONObject(this.jsonString); + if (patch.has("prevSig")) { + String prevSig = patch.getString("prevSig"); + if (prevSig != null) { + this.previousSignature = Base58.decode(prevSig); + } + } + if (patch.has("prevHash")) { + String prevHash = patch.getString("prevHash"); + if (prevHash != null) { + this.previousHash = Base58.decode(prevHash); + } + } + if (patch.has("curHash")) { + String curHash = patch.getString("curHash"); + if (curHash != null) { + this.currentHash = Base58.decode(curHash); + } + } + if (patch.has("added")) { + JSONArray added = (JSONArray) patch.get("added"); + if (added != null) { + for (int i=0; i()); + changeMap.setAccessible(false); + } catch (IllegalAccessException | NoSuchFieldException e) { + // Don't worry about failures as this is for optional ordering only + } + + patch.put("prevSig", Base58.encode(this.previousSignature)); + patch.put("prevHash", Base58.encode(this.previousHash)); + patch.put("curHash", Base58.encode(this.currentHash)); + patch.put("added", new JSONArray(this.addedPaths)); + patch.put("removed", new JSONArray(this.removedPaths)); + + JSONArray modifiedPaths = new JSONArray(); + for (ModifiedPath modifiedPath : this.modifiedPaths) { + JSONObject modifiedPathJson = new JSONObject(); + modifiedPathJson.put("path", modifiedPath.getPath()); + modifiedPathJson.put("type", modifiedPath.getDiffType()); + modifiedPaths.put(modifiedPathJson); + } + patch.put("modified", modifiedPaths); + + this.jsonString = patch.toString(2); + LOGGER.debug("Patch metadata: {}", this.jsonString); + } + + public void setAddedPaths(List addedPaths) { + this.addedPaths = addedPaths; + } + + public List getAddedPaths() { + return this.addedPaths; + } + + public void setModifiedPaths(List modifiedPaths) { + this.modifiedPaths = modifiedPaths; + } + + public List getModifiedPaths() { + return this.modifiedPaths; + } + + public void setRemovedPaths(List removedPaths) { + this.removedPaths = removedPaths; + } + + public List getRemovedPaths() { + return this.removedPaths; + } + + public void setPreviousSignature(byte[] previousSignature) { + this.previousSignature = previousSignature; + } + + public byte[] getPreviousSignature() { + return this.previousSignature; + } + + public void setPreviousHash(byte[] previousHash) { + this.previousHash = previousHash; + } + + public byte[] getPreviousHash() { + return this.previousHash; + } + + public void setCurrentHash(byte[] currentHash) { + this.currentHash = currentHash; + } + + public byte[] getCurrentHash() { + return this.currentHash; + } + + + public int getFileDifferencesCount() { + return this.addedPaths.size() + this.modifiedPaths.size() + this.removedPaths.size(); + } + +} diff --git a/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataQortalMetadata.java b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataQortalMetadata.java new file mode 100644 index 00000000..4c188843 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataQortalMetadata.java @@ -0,0 +1,102 @@ +package org.qortal.arbitrary.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.repository.DataException; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * ArbitraryDataQortalMetadata + * + * This is a base class to handle reading and writing JSON to a .qortal folder + * within the supplied filePath. This is used when storing data against an existing + * arbitrary data file structure. + * + * It is not usable on its own; it must be subclassed, with three methods overridden: + * + * fileName() - the file name to use within the .qortal folder + * readJson() - code to unserialize the JSON file + * buildJson() - code to serialize the JSON file + * + */ +public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata { + + protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataQortalMetadata.class); + + protected Path filePath; + protected Path qortalDirectoryPath; + + protected String jsonString; + + public ArbitraryDataQortalMetadata(Path filePath) { + super(filePath); + + this.qortalDirectoryPath = Paths.get(filePath.toString(), ".qortal"); + } + + protected String fileName() { + // To be overridden + return null; + } + + protected void readJson() throws DataException { + // To be overridden + } + + protected void buildJson() { + // To be overridden + } + + + @Override + public void read() throws IOException, DataException { + this.loadJson(); + this.readJson(); + } + + @Override + public void write() throws IOException, DataException { + this.buildJson(); + this.createParentDirectories(); + this.createQortalDirectory(); + + Path patchPath = Paths.get(this.qortalDirectoryPath.toString(), this.fileName()); + BufferedWriter writer = new BufferedWriter(new FileWriter(patchPath.toString())); + writer.write(this.jsonString); + writer.newLine(); + writer.close(); + } + + @Override + protected void loadJson() throws IOException { + Path path = Paths.get(this.qortalDirectoryPath.toString(), this.fileName()); + File patchFile = new File(path.toString()); + if (!patchFile.exists()) { + throw new IOException(String.format("Patch file doesn't exist: %s", path.toString())); + } + + this.jsonString = new String(Files.readAllBytes(path)); + } + + + protected void createQortalDirectory() throws DataException { + try { + Files.createDirectories(this.qortalDirectoryPath); + } catch (IOException e) { + throw new DataException("Unable to create .qortal directory"); + } + } + + + public String getJsonString() { + return this.jsonString; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataTransactionMetadata.java b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataTransactionMetadata.java new file mode 100644 index 00000000..abd47ec9 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/metadata/ArbitraryDataTransactionMetadata.java @@ -0,0 +1,78 @@ +package org.qortal.arbitrary.metadata; + +import org.json.JSONArray; +import org.json.JSONObject; +import org.qortal.repository.DataException; +import org.qortal.utils.Base58; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata { + + private List chunks; + + public ArbitraryDataTransactionMetadata(Path filePath) { + super(filePath); + + } + + @Override + protected void readJson() throws DataException { + if (this.jsonString == null) { + throw new DataException("Transaction metadata JSON string is null"); + } + + List chunksList = new ArrayList<>(); + JSONObject cache = new JSONObject(this.jsonString); + if (cache.has("chunks")) { + JSONArray chunks = cache.getJSONArray("chunks"); + if (chunks != null) { + for (int i=0; i chunks) { + this.chunks = chunks; + } + + public List getChunks() { + return this.chunks; + } + + public boolean containsChunk(byte[] chunk) { + for (byte[] c : this.chunks) { + if (Arrays.equals(c, chunk)) { + return true; + } + } + return false; + } + +} diff --git a/src/main/java/org/qortal/arbitrary/misc/Service.java b/src/main/java/org/qortal/arbitrary/misc/Service.java new file mode 100644 index 00000000..5d94d806 --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/misc/Service.java @@ -0,0 +1,131 @@ +package org.qortal.arbitrary.misc; + +import org.json.JSONObject; +import org.qortal.arbitrary.ArbitraryDataRenderer; +import org.qortal.transaction.Transaction; +import org.qortal.utils.FilesystemUtils; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.toMap; + +public enum Service { + AUTO_UPDATE(1, false, null, null), + ARBITRARY_DATA(100, false, null, null), + WEBSITE(200, true, null, null) { + @Override + public ValidationResult validate(Path path) { + // Custom validation function to require an index HTML file in the root directory + List fileNames = ArbitraryDataRenderer.indexFiles(); + String[] files = path.toFile().list(); + if (files != null) { + for (String file : files) { + Path fileName = Paths.get(file).getFileName(); + if (fileName != null && fileNames.contains(fileName.toString())) { + return ValidationResult.OK; + } + } + } + return ValidationResult.MISSING_INDEX_FILE; + } + }, + GIT_REPOSITORY(300, false, null, null), + IMAGE(400, true, 10*1024*1024L, null), + THUMBNAIL(410, true, 500*1024L, null), + VIDEO(500, false, null, null), + AUDIO(600, false, null, null), + BLOG(700, false, null, null), + BLOG_POST(777, false, null, null), + BLOG_COMMENT(778, false, null, null), + DOCUMENT(800, false, null, null), + LIST(900, true, null, null), + PLAYLIST(910, true, null, null), + APP(1000, false, null, null), + METADATA(1100, false, null, null), + QORTAL_METADATA(1111, true, 10*1024L, Arrays.asList("title", "description", "tags")); + + public final int value; + private final boolean requiresValidation; + private final Long maxSize; + private final List requiredKeys; + + private static final Map map = stream(Service.values()) + .collect(toMap(service -> service.value, service -> service)); + + Service(int value, boolean requiresValidation, Long maxSize, List requiredKeys) { + this.value = value; + this.requiresValidation = requiresValidation; + this.maxSize = maxSize; + this.requiredKeys = requiredKeys; + } + + public ValidationResult validate(Path path) throws IOException { + if (!this.isValidationRequired()) { + return ValidationResult.OK; + } + + byte[] data = FilesystemUtils.getSingleFileContents(path); + long size = FilesystemUtils.getDirectorySize(path); + + // Validate max size if needed + if (this.maxSize != null) { + if (size > this.maxSize) { + return ValidationResult.EXCEEDS_SIZE_LIMIT; + } + } + + // Validate required keys if needed + if (this.requiredKeys != null) { + if (data == null) { + return ValidationResult.MISSING_KEYS; + } + JSONObject json = Service.toJsonObject(data); + for (String key : this.requiredKeys) { + if (!json.has(key)) { + return ValidationResult.MISSING_KEYS; + } + } + } + + // Validation passed + return ValidationResult.OK; + } + + public boolean isValidationRequired() { + return this.requiresValidation; + } + + public static Service valueOf(int value) { + return map.get(value); + } + + public static JSONObject toJsonObject(byte[] data) { + String dataString = new String(data); + return new JSONObject(dataString); + } + + public enum ValidationResult { + OK(1), + MISSING_KEYS(2), + EXCEEDS_SIZE_LIMIT(3), + MISSING_INDEX_FILE(4); + + public final int value; + + private static final Map map = stream(Transaction.ValidationResult.values()).collect(toMap(result -> result.value, result -> result)); + + ValidationResult(int value) { + this.value = value; + } + + public static Transaction.ValidationResult valueOf(int value) { + return map.get(value); + } + } +} diff --git a/src/main/java/org/qortal/arbitrary/patch/UnifiedDiffPatch.java b/src/main/java/org/qortal/arbitrary/patch/UnifiedDiffPatch.java new file mode 100644 index 00000000..0408f4ca --- /dev/null +++ b/src/main/java/org/qortal/arbitrary/patch/UnifiedDiffPatch.java @@ -0,0 +1,229 @@ +package org.qortal.arbitrary.patch; + +import com.github.difflib.DiffUtils; +import com.github.difflib.UnifiedDiffUtils; +import com.github.difflib.patch.Patch; +import com.github.difflib.patch.PatchFailedException; +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.crypto.Crypto; +import org.qortal.repository.DataException; +import org.qortal.settings.Settings; +import org.qortal.utils.FilesystemUtils; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +public class UnifiedDiffPatch { + + private static final Logger LOGGER = LogManager.getLogger(UnifiedDiffPatch.class); + + private final Path before; + private final Path after; + private final Path destination; + + private String identifier; + private Path validationPath; + + public UnifiedDiffPatch(Path before, Path after, Path destination) { + this.before = before; + this.after = after; + this.destination = destination; + } + + /** + * Create a patch based on the differences in path "after" + * compared with base path "before", outputting the patch + * to the "destination" path. + * + * @throws IOException + */ + public void create() throws IOException { + if (!Files.exists(before)) { + throw new IOException(String.format("File not found (before): %s", before.toString())); + } + if (!Files.exists(after)) { + throw new IOException(String.format("File not found (after): %s", after.toString())); + } + + // Ensure parent folders exist in the destination + File file = new File(destination.toString()); + File parent = file.getParentFile(); + if (parent != null) { + parent.mkdirs(); + } + + // Delete an existing file if it exists + File destFile = destination.toFile(); + if (destFile.exists() && destFile.isFile()) { + Files.delete(destination); + } + + // Load the two files into memory + List original = FileUtils.readLines(before.toFile(), StandardCharsets.UTF_8); + List revised = FileUtils.readLines(after.toFile(), StandardCharsets.UTF_8); + + // Check if the original file ends with a newline + boolean endsWithNewline = FilesystemUtils.fileEndsWithNewline(before); + + // Generate diff information + Patch diff = DiffUtils.diff(original, revised); + + // Generate unified diff format + String originalFileName = before.getFileName().toString(); + String revisedFileName = after.getFileName().toString(); + List unifiedDiff = UnifiedDiffUtils.generateUnifiedDiff(originalFileName, revisedFileName, original, diff, 0); + + // Write the diff to the destination directory + FileWriter fileWriter = new FileWriter(destination.toString(), true); + BufferedWriter writer = new BufferedWriter(fileWriter); + for (int i=0; i originalContents = FileUtils.readLines(originalPath.toFile(), StandardCharsets.UTF_8); + List patchContents = FileUtils.readLines(patchPath.toFile(), StandardCharsets.UTF_8); + + // Check if the patch file (and therefore the original file) ends with a newline + boolean endsWithNewline = FilesystemUtils.fileEndsWithNewline(patchPath); + + // At first, parse the unified diff file and get the patch + Patch patch = UnifiedDiffUtils.parseUnifiedDiff(patchContents); + + // Then apply the computed patch to the given text + try { + List patchedContents = DiffUtils.patch(originalContents, patch); + + // Write the patched file to the merge directory + FileWriter fileWriter = new FileWriter(mergePath.toString(), true); + BufferedWriter writer = new BufferedWriter(fileWriter); + for (int i=0; i transactionData.getType() != TransactionType.AT).count(); // The number of non-AT transactions fetched from repository should correspond with Block's transactionCount - if (nonAtTransactionCount != this.blockData.getTransactionCount()) + if (nonAtTransactionCount != this.blockData.getTransactionCount()) { + LOGGER.error(() -> String.format("Block's transactions from repository (%d) do not match block's transaction count (%d)", nonAtTransactionCount, this.blockData.getTransactionCount())); throw new IllegalStateException("Block's transactions from repository do not match block's transaction count"); + } this.transactions = new ArrayList<>(); diff --git a/src/main/java/org/qortal/controller/ArbitraryDataManager.java b/src/main/java/org/qortal/controller/ArbitraryDataManager.java deleted file mode 100644 index 61447dbc..00000000 --- a/src/main/java/org/qortal/controller/ArbitraryDataManager.java +++ /dev/null @@ -1,91 +0,0 @@ -package org.qortal.controller; - -import java.util.Arrays; -import java.util.List; -import java.util.Random; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; -import org.qortal.data.transaction.ArbitraryTransactionData; -import org.qortal.data.transaction.TransactionData; -import org.qortal.repository.DataException; -import org.qortal.repository.Repository; -import org.qortal.repository.RepositoryManager; -import org.qortal.transaction.ArbitraryTransaction; -import org.qortal.transaction.Transaction.TransactionType; - -public class ArbitraryDataManager extends Thread { - - private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataManager.class); - private static final List ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY); - - private static ArbitraryDataManager instance; - - private volatile boolean isStopping = false; - - private ArbitraryDataManager() { - } - - public static ArbitraryDataManager getInstance() { - if (instance == null) - instance = new ArbitraryDataManager(); - - return instance; - } - - @Override - public void run() { - Thread.currentThread().setName("Arbitrary Data Manager"); - - try { - while (!isStopping) { - Thread.sleep(2000); - - // Any arbitrary transactions we want to fetch data for? - try (final Repository repository = RepositoryManager.getRepository()) { - List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, ConfirmationStatus.BOTH, null, null, true); - if (signatures == null || signatures.isEmpty()) - continue; - - // Filter out those that already have local data - signatures.removeIf(signature -> hasLocalData(repository, signature)); - - if (signatures.isEmpty()) - continue; - - // Pick one at random - final int index = new Random().nextInt(signatures.size()); - byte[] signature = signatures.get(index); - - Controller.getInstance().fetchArbitraryData(signature); - } catch (DataException e) { - LOGGER.error("Repository issue when fetching arbitrary transaction data", e); - } - } - } catch (InterruptedException e) { - // Fall-through to exit thread... - } - } - - public void shutdown() { - isStopping = true; - this.interrupt(); - } - - private boolean hasLocalData(final Repository repository, final byte[] signature) { - try { - TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); - if (!(transactionData instanceof ArbitraryTransactionData)) - return true; - - ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData); - - return arbitraryTransaction.isDataLocal(); - } catch (DataException e) { - LOGGER.error("Repository issue when checking arbitrary transaction's data is local", e); - return true; - } - } - -} diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index 4002cd4c..aff35b38 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -17,7 +17,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Deque; -import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -41,18 +40,21 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import com.google.common.primitives.Longs; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; import org.qortal.api.ApiService; +import org.qortal.api.DomainMapService; +import org.qortal.api.GatewayService; import org.qortal.block.Block; import org.qortal.block.BlockChain; import org.qortal.block.BlockChain.BlockTimingByHeight; +import org.qortal.controller.arbitrary.*; import org.qortal.controller.Synchronizer.SynchronizationResult; import org.qortal.controller.repository.PruneManager; import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.controller.tradebot.TradeBot; -import org.qortal.crypto.Crypto; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.data.block.BlockData; @@ -60,45 +62,24 @@ import org.qortal.data.block.BlockSummaryData; import org.qortal.data.network.OnlineAccountData; import org.qortal.data.network.PeerChainTipData; import org.qortal.data.network.PeerData; -import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ChatTransactionData; import org.qortal.data.transaction.TransactionData; -import org.qortal.data.transaction.ArbitraryTransactionData.DataType; import org.qortal.event.Event; import org.qortal.event.EventBus; -import org.qortal.data.transaction.ChatTransactionData; import org.qortal.globalization.Translator; import org.qortal.gui.Gui; import org.qortal.gui.SysTray; import org.qortal.network.Network; import org.qortal.network.Peer; -import org.qortal.network.message.ArbitraryDataMessage; -import org.qortal.network.message.BlockSummariesMessage; -import org.qortal.network.message.CachedBlockMessage; -import org.qortal.network.message.GetArbitraryDataMessage; -import org.qortal.network.message.GetBlockMessage; -import org.qortal.network.message.GetBlockSummariesMessage; -import org.qortal.network.message.GetOnlineAccountsMessage; -import org.qortal.network.message.GetPeersMessage; -import org.qortal.network.message.GetSignaturesV2Message; -import org.qortal.network.message.GetTransactionMessage; -import org.qortal.network.message.GetUnconfirmedTransactionsMessage; -import org.qortal.network.message.HeightV2Message; -import org.qortal.network.message.Message; -import org.qortal.network.message.OnlineAccountsMessage; -import org.qortal.network.message.SignaturesMessage; -import org.qortal.network.message.TransactionMessage; -import org.qortal.network.message.TransactionSignaturesMessage; +import org.qortal.network.message.*; import org.qortal.repository.*; import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory; import org.qortal.settings.Settings; -import org.qortal.transaction.ArbitraryTransaction; import org.qortal.transaction.Transaction; import org.qortal.transaction.Transaction.TransactionType; import org.qortal.transaction.Transaction.ValidationResult; import org.qortal.utils.*; -import com.google.common.primitives.Longs; - public class Controller extends Thread { static { @@ -109,14 +90,13 @@ public class Controller extends Thread { /** Controller start-up time (ms) taken using System.currentTimeMillis(). */ public static final long startTime = System.currentTimeMillis(); - public static final String VERSION_PREFIX = "qortal-"; + public static final String VERSION_PREFIX = "qortaldata-"; private static final Logger LOGGER = LogManager.getLogger(Controller.class); private static final long MISBEHAVIOUR_COOLOFF = 10 * 60 * 1000L; // ms private static final int MAX_BLOCKCHAIN_TIP_AGE = 5; // blocks private static final Object shutdownLock = new Object(); private static final String repositoryUrlTemplate = "jdbc:hsqldb:file:%s" + File.separator + "blockchain;create=true;hsqldb.full_log_replay=true"; - private static final long ARBITRARY_REQUEST_TIMEOUT = 5 * 1000L; // ms private static final long NTP_PRE_SYNC_CHECK_PERIOD = 5 * 1000L; // ms private static final long NTP_POST_SYNC_CHECK_PERIOD = 5 * 60 * 1000L; // ms private static final long DELETE_EXPIRED_INTERVAL = 5 * 60 * 1000L; // ms @@ -181,25 +161,6 @@ public class Controller extends Thread { private boolean peersAvailable = true; // peersAvailable must default to true private long timePeersLastAvailable = 0; - /** - * Map of recent requests for ARBITRARY transaction data payloads. - *

- * Key is original request's message ID
- * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp> - *

- * If peer is null then either:
- *

    - *
  • we are the original requesting peer
  • - *
  • we have already sent data payload to original requesting peer.
  • - *
- * If signature is null then we have already received the data payload and either:
- *
    - *
  • we are the original requesting peer and have saved it locally
  • - *
  • we have forwarded the data payload (and maybe also saved it locally)
  • - *
- */ - private Map> arbitraryDataRequests = Collections.synchronizedMap(new HashMap<>()); - /** Lock for only allowing one blockchain-modifying codepath at a time. e.g. synchronization or newly minted block. */ private final ReentrantLock blockchainLock = new ReentrantLock(); @@ -242,12 +203,30 @@ public class Controller extends Thread { } public GetBlockSignaturesV2Stats getBlockSignaturesV2Stats = new GetBlockSignaturesV2Stats(); + public static class GetArbitraryDataFileMessageStats { + public AtomicLong requests = new AtomicLong(); + public AtomicLong unknownFiles = new AtomicLong(); + + public GetArbitraryDataFileMessageStats() { + } + } + public GetArbitraryDataFileMessageStats getArbitraryDataFileMessageStats = new GetArbitraryDataFileMessageStats(); + + public static class GetArbitraryDataFileListMessageStats { + public AtomicLong requests = new AtomicLong(); + public AtomicLong unknownFiles = new AtomicLong(); + + public GetArbitraryDataFileListMessageStats() { + } + } + public GetArbitraryDataFileListMessageStats getArbitraryDataFileListMessageStats = new GetArbitraryDataFileListMessageStats(); + public AtomicLong latestBlocksCacheRefills = new AtomicLong(); public StatsSnapshot() { } } - private final StatsSnapshot stats = new StatsSnapshot(); + public final StatsSnapshot stats = new StatsSnapshot(); // Constructors @@ -387,6 +366,8 @@ public class Controller extends Thread { // Entry point public static void main(String[] args) { + LoggingUtils.fixLegacyLog4j2Properties(); + LOGGER.info("Starting up..."); // Potential GUI startup with splash screen, etc. @@ -493,9 +474,13 @@ public class Controller extends Thread { LOGGER.info("Starting trade-bot"); TradeBot.getInstance(); - // Arbitrary transaction data manager - // LOGGER.info("Starting arbitrary-transaction data manager"); - // ArbitraryDataManager.getInstance().start(); + // Arbitrary data controllers + LOGGER.info("Starting arbitrary-transaction controllers"); + ArbitraryDataManager.getInstance().start(); + ArbitraryDataBuildManager.getInstance().start(); + ArbitraryDataCleanupManager.getInstance().start(); + ArbitraryDataStorageManager.getInstance().start(); + ArbitraryDataRenderManager.getInstance().start(); // Auto-update service? if (Settings.getInstance().isAutoUpdateEnabled()) { @@ -514,6 +499,32 @@ public class Controller extends Thread { return; // Not System.exit() so that GUI can display error } + if (Settings.getInstance().isGatewayEnabled()) { + LOGGER.info(String.format("Starting gateway service on port %d", Settings.getInstance().getGatewayPort())); + try { + GatewayService gatewayService = GatewayService.getInstance(); + gatewayService.start(); + } catch (Exception e) { + LOGGER.error("Unable to start gateway service", e); + Controller.getInstance().shutdown(); + Gui.getInstance().fatalError("Gateway service failure", e); + return; // Not System.exit() so that GUI can display error + } + } + + if (Settings.getInstance().isDomainMapEnabled()) { + LOGGER.info(String.format("Starting domain map service on port %d", Settings.getInstance().getDomainMapPort())); + try { + DomainMapService domainMapService = DomainMapService.getInstance(); + domainMapService.start(); + } catch (Exception e) { + LOGGER.error("Unable to start domain map service", e); + Controller.getInstance().shutdown(); + Gui.getInstance().fatalError("Domain map service failure", e); + return; // Not System.exit() so that GUI can display error + } + } + // If GUI is enabled, we're no longer starting up but actually running now Gui.getInstance().notifyRunning(); } @@ -574,8 +585,9 @@ public class Controller extends Thread { } // Clean up arbitrary data request cache - final long requestMinimumTimestamp = now - ARBITRARY_REQUEST_TIMEOUT; - arbitraryDataRequests.entrySet().removeIf(entry -> entry.getValue().getC() < requestMinimumTimestamp); + ArbitraryDataManager.getInstance().cleanupRequestCache(now); + // Clean up arbitrary data queues and lists + ArbitraryDataBuildManager.getInstance().cleanupQueues(now); // Time to 'checkpoint' uncommitted repository writes? if (now >= repositoryCheckpointTimestamp + repositoryCheckpointInterval) { @@ -1054,9 +1066,13 @@ public class Controller extends Thread { AutoUpdate.getInstance().shutdown(); } - // Arbitrary transaction data manager - // LOGGER.info("Shutting down arbitrary-transaction data manager"); - // ArbitraryDataManager.getInstance().shutdown(); + // Arbitrary data controllers + LOGGER.info("Shutting down arbitrary-transaction controllers"); + ArbitraryDataManager.getInstance().shutdown(); + ArbitraryDataBuildManager.getInstance().shutdown(); + ArbitraryDataCleanupManager.getInstance().shutdown(); + ArbitraryDataStorageManager.getInstance().shutdown(); + ArbitraryDataRenderManager.getInstance().shutdown(); if (blockMinter != null) { LOGGER.info("Shutting down block minter"); @@ -1352,14 +1368,6 @@ public class Controller extends Thread { onNetworkTransactionSignaturesMessage(peer, message); break; - case GET_ARBITRARY_DATA: - onNetworkGetArbitraryDataMessage(peer, message); - break; - - case ARBITRARY_DATA: - onNetworkArbitraryDataMessage(peer, message); - break; - case GET_ONLINE_ACCOUNTS: onNetworkGetOnlineAccountsMessage(peer, message); break; @@ -1368,6 +1376,26 @@ public class Controller extends Thread { onNetworkOnlineAccountsMessage(peer, message); break; + case GET_ARBITRARY_DATA: + // Not currently supported + break; + + case ARBITRARY_DATA_FILE_LIST: + ArbitraryDataFileListManager.getInstance().onNetworkArbitraryDataFileListMessage(peer, message); + break; + + case GET_ARBITRARY_DATA_FILE: + ArbitraryDataFileManager.getInstance().onNetworkGetArbitraryDataFileMessage(peer, message); + break; + + case GET_ARBITRARY_DATA_FILE_LIST: + ArbitraryDataFileListManager.getInstance().onNetworkGetArbitraryDataFileListMessage(peer, message); + break; + + case ARBITRARY_SIGNATURES: + ArbitraryDataManager.getInstance().onNetworkArbitrarySignaturesMessage(peer, message); + break; + default: LOGGER.debug(() -> String.format("Unhandled %s message [ID %d] from peer %s", message.getType().name(), message.getId(), peer)); break; @@ -1733,103 +1761,6 @@ public class Controller extends Thread { } } - private void onNetworkGetArbitraryDataMessage(Peer peer, Message message) { - GetArbitraryDataMessage getArbitraryDataMessage = (GetArbitraryDataMessage) message; - - byte[] signature = getArbitraryDataMessage.getSignature(); - String signature58 = Base58.encode(signature); - Long timestamp = NTP.getTime(); - Triple newEntry = new Triple<>(signature58, peer, timestamp); - - // If we've seen this request recently, then ignore - if (arbitraryDataRequests.putIfAbsent(message.getId(), newEntry) != null) - return; - - // Do we even have this transaction? - try (final Repository repository = RepositoryManager.getRepository()) { - TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); - if (transactionData == null || transactionData.getType() != TransactionType.ARBITRARY) - return; - - ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); - - // If we have the data then send it - if (transaction.isDataLocal()) { - byte[] data = transaction.fetchData(); - if (data == null) - return; - - // Update requests map to reflect that we've sent it - newEntry = new Triple<>(signature58, null, timestamp); - arbitraryDataRequests.put(message.getId(), newEntry); - - Message arbitraryDataMessage = new ArbitraryDataMessage(signature, data); - arbitraryDataMessage.setId(message.getId()); - if (!peer.sendMessage(arbitraryDataMessage)) - peer.disconnect("failed to send arbitrary data"); - - return; - } - - // Ask our other peers if they have it - Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : message); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while finding arbitrary transaction data for peer %s", peer), e); - } - } - - private void onNetworkArbitraryDataMessage(Peer peer, Message message) { - ArbitraryDataMessage arbitraryDataMessage = (ArbitraryDataMessage) message; - - // Do we have a pending request for this data? - Triple request = arbitraryDataRequests.get(message.getId()); - if (request == null || request.getA() == null) - return; - - // Does this message's signature match what we're expecting? - byte[] signature = arbitraryDataMessage.getSignature(); - String signature58 = Base58.encode(signature); - if (!request.getA().equals(signature58)) - return; - - byte[] data = arbitraryDataMessage.getData(); - - // Check transaction exists and payload hash is correct - try (final Repository repository = RepositoryManager.getRepository()) { - TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); - if (!(transactionData instanceof ArbitraryTransactionData)) - return; - - ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; - - byte[] actualHash = Crypto.digest(data); - - // "data" from repository will always be hash of actual raw data - if (!Arrays.equals(arbitraryTransactionData.getData(), actualHash)) - return; - - // Update requests map to reflect that we've received it - Triple newEntry = new Triple<>(null, null, request.getC()); - arbitraryDataRequests.put(message.getId(), newEntry); - - // Save payload locally - // TODO: storage policy - arbitraryTransactionData.setDataType(DataType.RAW_DATA); - arbitraryTransactionData.setData(data); - repository.getArbitraryRepository().save(arbitraryTransactionData); - repository.saveChanges(); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while finding arbitrary transaction data for peer %s", peer), e); - } - - Peer requestingPeer = request.getB(); - if (requestingPeer != null) { - // Forward to requesting peer; - if (!requestingPeer.sendMessage(arbitraryDataMessage)) - requestingPeer.disconnect("failed to forward arbitrary data"); - } - } - private void onNetworkGetOnlineAccountsMessage(Peer peer, Message message) { GetOnlineAccountsMessage getOnlineAccountsMessage = (GetOnlineAccountsMessage) message; @@ -2124,51 +2055,6 @@ public class Controller extends Thread { } } - public byte[] fetchArbitraryData(byte[] signature) throws InterruptedException { - // Build request - Message getArbitraryDataMessage = new GetArbitraryDataMessage(signature); - - // Save our request into requests map - String signature58 = Base58.encode(signature); - Triple requestEntry = new Triple<>(signature58, null, NTP.getTime()); - - // Assign random ID to this message - int id; - do { - id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1; - - // Put queue into map (keyed by message ID) so we can poll for a response - // If putIfAbsent() doesn't return null, then this ID is already taken - } while (arbitraryDataRequests.put(id, requestEntry) != null); - getArbitraryDataMessage.setId(id); - - // Broadcast request - Network.getInstance().broadcast(peer -> getArbitraryDataMessage); - - // Poll to see if data has arrived - final long singleWait = 100; - long totalWait = 0; - while (totalWait < ARBITRARY_REQUEST_TIMEOUT) { - Thread.sleep(singleWait); - - requestEntry = arbitraryDataRequests.get(id); - if (requestEntry == null) - return null; - - if (requestEntry.getA() == null) - break; - - totalWait += singleWait; - } - - try (final Repository repository = RepositoryManager.getRepository()) { - return repository.getArbitraryRepository().fetchData(signature); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while fetching arbitrary transaction data"), e); - return null; - } - } - /** Returns a list of peers that are not misbehaving, and have a recent block. */ public List getRecentBehavingPeers() { final Long minLatestBlockTimestamp = getMinimumLatestBlockTimestamp(); diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java index b5bce3c5..d5e489c8 100644 --- a/src/main/java/org/qortal/controller/Synchronizer.java +++ b/src/main/java/org/qortal/controller/Synchronizer.java @@ -37,12 +37,14 @@ import org.qortal.transaction.Transaction; import org.qortal.utils.Base58; import org.qortal.utils.NTP; +import static org.qortal.network.Peer.FETCH_BLOCKS_TIMEOUT; + public class Synchronizer { private static final Logger LOGGER = LogManager.getLogger(Synchronizer.class); /** Max number of new blocks we aim to add to chain tip in each sync round */ - private static final int SYNC_BATCH_SIZE = 200; // XXX move to Settings? + private static final int SYNC_BATCH_SIZE = 1000; // XXX move to Settings? /** Initial jump back of block height when searching for common block with peer */ private static final int INITIAL_BLOCK_STEP = 8; @@ -56,6 +58,8 @@ public class Synchronizer { private static final int MAXIMUM_REQUEST_SIZE = 200; // XXX move to Settings? + + // Keep track of the size of the last re-org, so it can be logged private int lastReorgSize; @@ -585,16 +589,7 @@ public class Synchronizer { String syncString = String.format("Synchronizing with peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer, peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(), ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp()); - - // If our latest block is very old, we should log that we're attempting to sync with a peer - // Otherwise, it can appear as though nothing is happening for a while after launch - final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp(); - if (minLatestBlockTimestamp != null && ourLatestBlockData.getTimestamp() < minLatestBlockTimestamp) { - LOGGER.info(syncString); - } - else { - LOGGER.debug(syncString); - } + LOGGER.info(syncString); // Reset last re-org size as we are starting a new sync round this.lastReorgSize = 0; @@ -872,7 +867,7 @@ public class Synchronizer { } private SynchronizationResult syncToPeerChain(Repository repository, BlockData commonBlockData, int ourInitialHeight, - Peer peer, final int peerHeight, List peerBlockSummaries) throws DataException, InterruptedException { + Peer peer, final int peerHeight, List peerBlockSummaries) throws DataException, InterruptedException { final int commonBlockHeight = commonBlockData.getHeight(); final byte[] commonBlockSig = commonBlockData.getSignature(); String commonBlockSig58 = Base58.encode(commonBlockSig); @@ -902,19 +897,19 @@ public class Synchronizer { if (Controller.isStopping()) return SynchronizationResult.SHUTTING_DOWN; - // Ensure we don't request more than MAXIMUM_REQUEST_SIZE - int numberRequested = Math.min(numberSignaturesRequired, MAXIMUM_REQUEST_SIZE); + // Ensure we don't request more than MAXIMUM_REQUEST_SIZE + int numberRequested = Math.min(numberSignaturesRequired, MAXIMUM_REQUEST_SIZE); - // Do we need more signatures? + // Do we need more signatures? if (peerBlockSignatures.isEmpty() && numberRequested > 0) { - LOGGER.trace(String.format("Requesting %d signature%s after height %d, sig %.8s", - numberRequested, (numberRequested != 1 ? "s" : ""), height, Base58.encode(latestPeerSignature))); + LOGGER.trace(String.format("Requesting %d signature%s after height %d, sig %.8s", + numberRequested, (numberRequested != 1 ? "s" : ""), height, Base58.encode(latestPeerSignature))); - peerBlockSignatures = this.getBlockSignatures(peer, latestPeerSignature, numberRequested); + peerBlockSignatures = this.getBlockSignatures(peer, latestPeerSignature, numberRequested); - if (peerBlockSignatures == null || peerBlockSignatures.isEmpty()) { - LOGGER.info(String.format("Peer %s failed to respond with more block signatures after height %d, sig %.8s", peer, - height, Base58.encode(latestPeerSignature))); + if (peerBlockSignatures == null || peerBlockSignatures.isEmpty()) { + LOGGER.info(String.format("Peer %s failed to respond with more block signatures after height %d, sig %.8s", peer, + height, Base58.encode(latestPeerSignature))); // Clear our cache of common block summaries for this peer, as they are likely to be invalid CommonBlockData cachedCommonBlockData = peer.getCommonBlockData(); @@ -924,7 +919,7 @@ public class Synchronizer { // If we have already received newer blocks from this peer that what we have already, go ahead and apply them if (peerBlocks.size() > 0) { final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock(); - final Block peerLatestBlock = peerBlocks.get(peerBlocks.size() - 1); + final Block peerLatestBlock = peerBlocks.get(peerBlocks.size() - 1); final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp(); if (ourLatestBlockData != null && peerLatestBlock != null && minLatestBlockTimestamp != null) { @@ -947,8 +942,8 @@ public class Synchronizer { return SynchronizationResult.NO_REPLY; } - numberSignaturesRequired = peerHeight - height - peerBlockSignatures.size(); - LOGGER.trace(String.format("Received %s signature%s", peerBlockSignatures.size(), (peerBlockSignatures.size() != 1 ? "s" : ""))); + numberSignaturesRequired = peerHeight - height - peerBlockSignatures.size(); + LOGGER.trace(String.format("Received %s signature%s", peerBlockSignatures.size(), (peerBlockSignatures.size() != 1 ? "s" : ""))); } if (peerBlockSignatures.isEmpty()) { @@ -1098,7 +1093,7 @@ public class Synchronizer { } private SynchronizationResult applyNewBlocks(Repository repository, BlockData commonBlockData, int ourInitialHeight, - Peer peer, int peerHeight, List peerBlockSummaries) throws InterruptedException, DataException { + Peer peer, int peerHeight, List peerBlockSummaries) throws InterruptedException, DataException { LOGGER.debug(String.format("Fetching new blocks from peer %s", peer)); final int commonBlockHeight = commonBlockData.getHeight(); diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java new file mode 100644 index 00000000..3df82d66 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java @@ -0,0 +1,185 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataBuildQueueItem; +import org.qortal.utils.NTP; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class ArbitraryDataBuildManager extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuildManager.class); + + private static ArbitraryDataBuildManager instance; + + private volatile boolean isStopping = false; + private boolean buildInProgress = false; + + /** + * Map to keep track of arbitrary transaction resources currently being built (or queued). + */ + public Map arbitraryDataBuildQueue = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of failed arbitrary transaction builds. + */ + public Map arbitraryDataFailedBuilds = Collections.synchronizedMap(new HashMap<>()); + + + public ArbitraryDataBuildManager() { + + } + + @Override + public void run() { + try { + // Use a fixed thread pool to execute the arbitrary data build actions (currently just a single thread) + // This can be expanded to have multiple threads processing the build queue when needed + ExecutorService arbitraryDataBuildExecutor = Executors.newFixedThreadPool(1); + arbitraryDataBuildExecutor.execute(new ArbitraryDataBuilderThread()); + + while (!isStopping) { + // Nothing to do yet + Thread.sleep(5000); + } + + } catch (InterruptedException e) { + // Fall-through to exit thread... + } + } + + public static ArbitraryDataBuildManager getInstance() { + if (instance == null) + instance = new ArbitraryDataBuildManager(); + + return instance; + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + } + + + public void cleanupQueues(Long now) { + if (now == null) { + return; + } + arbitraryDataBuildQueue.entrySet().removeIf(entry -> entry.getValue().hasReachedBuildTimeout(now)); + arbitraryDataFailedBuilds.entrySet().removeIf(entry -> entry.getValue().hasReachedFailureTimeout(now)); + } + + // Build queue + + public boolean addToBuildQueue(ArbitraryDataBuildQueueItem queueItem) { + String key = queueItem.getUniqueKey(); + if (key == null) { + return false; + } + + if (this.arbitraryDataBuildQueue == null) { + return false; + } + + if (NTP.getTime() == null) { + // Can't use queues until we have synced the time + return false; + } + + // Don't add builds that have failed recently + if (this.isInFailedBuildsList(queueItem)) { + return false; + } + + if (this.arbitraryDataBuildQueue.put(key, queueItem) != null) { + // Already in queue + return true; + } + + LOGGER.info("Added {} to build queue", queueItem); + + // Added to queue + return true; + } + + public boolean isInBuildQueue(ArbitraryDataBuildQueueItem queueItem) { + String key = queueItem.getUniqueKey(); + if (key == null) { + return false; + } + + if (this.arbitraryDataBuildQueue == null) { + return false; + } + + if (this.arbitraryDataBuildQueue.containsKey(key)) { + // Already in queue + return true; + } + + // Not in queue + return false; + } + + + // Failed builds + + public boolean addToFailedBuildsList(ArbitraryDataBuildQueueItem queueItem) { + String key = queueItem.getUniqueKey(); + if (key == null) { + return false; + } + + if (this.arbitraryDataFailedBuilds == null) { + return false; + } + + if (NTP.getTime() == null) { + // Can't use queues until we have synced the time + return false; + } + + if (this.arbitraryDataFailedBuilds.put(key, queueItem) != null) { + // Already in list + return true; + } + + LOGGER.info("Added {} to failed builds list", queueItem); + + // Added to queue + return true; + } + + public boolean isInFailedBuildsList(ArbitraryDataBuildQueueItem queueItem) { + String key = queueItem.getUniqueKey(); + if (key == null) { + return false; + } + + if (this.arbitraryDataFailedBuilds == null) { + return false; + } + + if (this.arbitraryDataFailedBuilds.containsKey(key)) { + // Already in list + return true; + } + + // Not in list + return false; + } + + + public void setBuildInProgress(boolean buildInProgress) { + this.buildInProgress = buildInProgress; + } + + public boolean getBuildInProgress() { + return this.buildInProgress; + } +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java new file mode 100644 index 00000000..da7c7293 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java @@ -0,0 +1,98 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataBuildQueueItem; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.controller.Controller; +import org.qortal.repository.DataException; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.util.Map; + + +public class ArbitraryDataBuilderThread implements Runnable { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuilderThread.class); + + public ArbitraryDataBuilderThread() { + + } + + public void run() { + Thread.currentThread().setName("Arbitrary Data Build Manager"); + ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance(); + + while (!Controller.isStopping()) { + try { + Thread.sleep(1000); + + if (buildManager.arbitraryDataBuildQueue == null) { + continue; + } + if (buildManager.arbitraryDataBuildQueue.isEmpty()) { + continue; + } + + // Find resources that are queued for building + Map.Entry next = buildManager.arbitraryDataBuildQueue + .entrySet().stream() + .filter(e -> e.getValue().isQueued()) + .findFirst().get(); + + if (next == null) { + continue; + } + + Long now = NTP.getTime(); + if (now == null) { + continue; + } + + ArbitraryDataBuildQueueItem queueItem = next.getValue(); + + if (queueItem == null) { + this.removeFromQueue(queueItem); + } + + // Ignore builds that have failed recently + if (buildManager.isInFailedBuildsList(queueItem)) { + continue; + } + + + try { + // Perform the build + LOGGER.info("Building {}...", queueItem); + queueItem.build(); + this.removeFromQueue(queueItem); + LOGGER.info("Finished building {}", queueItem); + + } catch (MissingDataException e) { + LOGGER.info("Missing data for {}: {}", queueItem, e.getMessage()); + queueItem.setFailed(true); + this.removeFromQueue(queueItem); + // Don't add to the failed builds list, as we may want to retry sooner + + } catch (IOException | DataException | RuntimeException e) { + LOGGER.info("Error building {}: {}", queueItem, e.getMessage()); + // Something went wrong - so remove it from the queue, and add to failed builds list + queueItem.setFailed(true); + buildManager.addToFailedBuildsList(queueItem); + this.removeFromQueue(queueItem); + } + + } catch (InterruptedException e) { + // Time to exit + } + } + } + + private void removeFromQueue(ArbitraryDataBuildQueueItem queueItem) { + if (queueItem == null || queueItem.getUniqueKey() == null) { + return; + } + ArbitraryDataBuildManager.getInstance().arbitraryDataBuildQueue.remove(queueItem.getUniqueKey()); + } +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java new file mode 100644 index 00000000..8c263568 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java @@ -0,0 +1,544 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.transaction.Transaction; +import org.qortal.transaction.Transaction.TransactionType; +import org.qortal.utils.ArbitraryTransactionUtils; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.NTP; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.SecureRandom; +import java.util.*; + +import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD; + +public class ArbitraryDataCleanupManager extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataCleanupManager.class); + private static final List ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY); + + private static ArbitraryDataCleanupManager instance; + + private volatile boolean isStopping = false; + + /** + * The amount of time that must pass before a file is treated as stale / not recent. + * We can safely delete files created/accessed longer ago that this, if we have a means of + * rebuilding them. The main purpose of this is to avoid deleting files that are currently + * being used by other parts of the system. + */ + private static final long STALE_FILE_TIMEOUT = 60*60*1000L; // 1 hour + + /** + * The number of chunks to delete in a batch when over the capacity limit. + * Storage limits are re-checked after each batch, and there could be a significant + * delay between the processing of each batch as it only occurs after a complete + * cleanup cycle (to allow unwanted chunks to be deleted first). + */ + private static final int CHUNK_DELETION_BATCH_SIZE = 10; + + + /* + TODO: + - Delete files from the _misc folder once they reach a certain age + */ + + + private ArbitraryDataCleanupManager() { + } + + public static ArbitraryDataCleanupManager getInstance() { + if (instance == null) + instance = new ArbitraryDataCleanupManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data Cleanup Manager"); + + // Paginate queries when fetching arbitrary transactions + final int limit = 100; + int offset = 0; + + try { + while (!isStopping) { + Thread.sleep(30000); + + // Don't run if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + Thread.sleep(60 * 60 * 1000L); + continue; + } + + Long now = NTP.getTime(); + if (now == null) { + // Don't attempt to make decisions if we haven't synced our time yet + continue; + } + + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + + // Wait until storage capacity has been calculated + if (!storageManager.isStorageCapacityCalculated()) { + continue; + } + + // Periodically delete any unnecessary files from the temp directory + if (offset == 0 || offset % (limit * 10) == 0) { + this.cleanupTempDirectory(now); + } + + // Any arbitrary transactions we want to fetch data for? + try (final Repository repository = RepositoryManager.getRepository()) { + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true); + // LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit); + if (signatures == null || signatures.isEmpty()) { + offset = 0; + continue; + } + offset += limit; + now = NTP.getTime(); + + // Loop through the signatures in this batch + for (int i=0; i findPathsWithNoAssociatedTransaction(Repository repository) { + List pathList = new ArrayList<>(); + + // Find all hosted paths + List allPaths = ArbitraryDataStorageManager.getInstance().findAllHostedPaths(); + + // Loop through each path and find those without matching signatures + for (Path path : allPaths) { + try { + String[] contents = path.toFile().list(); + if (contents == null || contents.length == 0) { + // Ignore empty directories + continue; + } + + String signature58 = path.getFileName().toString(); + byte[] signature = Base58.decode(signature58); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (transactionData == null) { + // No transaction data, and no DataException, so we can assume that this data relates to an expired transaction + pathList.add(path); + } + + } catch (DataException e) { + continue; + } + } + + return pathList; + } + + private void checkForExpiredTransactions(Repository repository) { + List expiredPaths = this.findPathsWithNoAssociatedTransaction(repository); + for (Path expiredPath : expiredPaths) { + LOGGER.info("Found path with no associated transaction: {}", expiredPath.toString()); + this.safeDeleteDirectory(expiredPath.toFile(), "no matching transaction"); + } + } + + private void storageLimitReached(Repository repository) throws InterruptedException { + // We think that the storage limit has been reached + + // Now calculate the used/total storage again, as a safety precaution + Long now = NTP.getTime(); + ArbitraryDataStorageManager.getInstance().calculateDirectorySize(now); + if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailable(DELETION_THRESHOLD)) { + // We have space available, so don't delete anything + return; + } + + // Delete a batch of random chunks + // This reduces the chance of too many nodes deleting the same chunk + // when they reach their storage limit + Path dataPath = Paths.get(Settings.getInstance().getDataPath()); + for (int i=0; i + * Key is original request's message ID
+ * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp> + *

+ * If peer is null then either:
+ *

    + *
  • we are the original requesting peer
  • + *
  • we have already sent data payload to original requesting peer.
  • + *
+ * If signature is null then we have already received the file list and either:
+ *
    + *
  • we are the original requesting peer and have processed it
  • + *
  • we have forwarded the file list
  • + *
+ */ + public Map> arbitraryDataFileListRequests = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of in progress arbitrary data signature requests + * Key: string - the signature encoded in base58 + * Value: Triple + */ + private Map> arbitraryDataSignatureRequests = Collections.synchronizedMap(new HashMap<>()); + + + /** Maximum number of seconds that a file list relay request is able to exist on the network */ + private static long RELAY_REQUEST_MAX_DURATION = 5000L; + /** Maximum number of hops that a file list relay request is allowed to make */ + private static int RELAY_REQUEST_MAX_HOPS = 3; + + + private ArbitraryDataFileListManager() { + } + + public static ArbitraryDataFileListManager getInstance() { + if (instance == null) + instance = new ArbitraryDataFileListManager(); + + return instance; + } + + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT; + arbitraryDataFileListRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp); + } + + + // Track file list lookups by signature + + private boolean shouldMakeFileListRequestForSignature(String signature58) { + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return true; + } + + // Extract the components + Integer networkBroadcastCount = request.getA(); + // Integer directPeerRequestCount = request.getB(); + Long lastAttemptTimestamp = request.getC(); + + if (lastAttemptTimestamp == null) { + // Not attempted yet + return true; + } + + long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp; + if (timeSinceLastAttempt > 5 * 60 * 1000L) { + // We haven't tried for at least 5 minutes + + if (networkBroadcastCount < 5) { + // We've made less than 5 total attempts + return true; + } + } + + if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) { + // We haven't tried for at least 24 hours + return true; + } + + return false; + } + + private boolean shouldMakeDirectFileRequestsForSignature(String signature58) { + if (!Settings.getInstance().isDirectDataRetrievalEnabled()) { + // Direct connections are disabled in the settings + return false; + } + + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return true; + } + + // Extract the components + //Integer networkBroadcastCount = request.getA(); + Integer directPeerRequestCount = request.getB(); + Long lastAttemptTimestamp = request.getC(); + + if (lastAttemptTimestamp == null) { + // Not attempted yet + return true; + } + + if (directPeerRequestCount == 0) { + // We haven't tried asking peers directly yet, so we should + return true; + } + + long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp; + if (timeSinceLastAttempt > 10 * 1000L) { + // We haven't tried for at least 10 seconds + if (directPeerRequestCount < 5) { + // We've made less than 5 total attempts + return true; + } + } + + if (timeSinceLastAttempt > 5 * 60 * 1000L) { + // We haven't tried for at least 5 minutes + if (directPeerRequestCount < 10) { + // We've made less than 10 total attempts + return true; + } + } + + if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) { + // We haven't tried for at least 24 hours + return true; + } + + return false; + } + + public boolean isSignatureRateLimited(byte[] signature) { + String signature58 = Base58.encode(signature); + return !this.shouldMakeFileListRequestForSignature(signature58) + && !this.shouldMakeDirectFileRequestsForSignature(signature58); + } + + public long lastRequestForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return 0; + } + + // Extract the components + Long lastAttemptTimestamp = request.getC(); + if (lastAttemptTimestamp != null) { + return lastAttemptTimestamp; + } + return 0; + } + + public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) { + Triple request = arbitraryDataSignatureRequests.get(signature58); + Long now = NTP.getTime(); + + if (request == null) { + // No entry yet + Triple newRequest = new Triple<>(0, 0, now); + arbitraryDataSignatureRequests.put(signature58, newRequest); + } + else { + // There is an existing entry + if (incrementNetworkRequests) { + request.setA(request.getA() + 1); + } + if (incrementPeerRequests) { + request.setB(request.getB() + 1); + } + request.setC(now); + arbitraryDataSignatureRequests.put(signature58, request); + } + } + + public void removeFromSignatureRequests(String signature58) { + arbitraryDataSignatureRequests.remove(signature58); + } + + + // Lookup file lists by signature + + public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) { + byte[] signature = arbitraryTransactionData.getSignature(); + String signature58 = Base58.encode(signature); + + // Require an NTP sync + Long now = NTP.getTime(); + if (now == null) { + return false; + } + + // If we've already tried too many times in a short space of time, make sure to give up + if (!this.shouldMakeFileListRequestForSignature(signature58)) { + // Check if we should make direct connections to peers + if (this.shouldMakeDirectFileRequestsForSignature(signature58)) { + return ArbitraryDataFileManager.getInstance().fetchDataFilesFromPeersForSignature(signature); + } + + LOGGER.debug("Skipping file list request for signature {} due to rate limit", signature58); + return false; + } + this.addToSignatureRequests(signature58, true, false); + + List handshakedPeers = Network.getInstance().getHandshakedPeers(); + LOGGER.debug(String.format("Sending data file list request for signature %s to %d peers...", signature58, handshakedPeers.size())); + + // Build request + Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, now, 0); + + // Save our request into requests map + Triple requestEntry = new Triple<>(signature58, null, NTP.getTime()); + + // Assign random ID to this message + int id; + do { + id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1; + + // Put queue into map (keyed by message ID) so we can poll for a response + // If putIfAbsent() doesn't return null, then this ID is already taken + } while (arbitraryDataFileListRequests.put(id, requestEntry) != null); + getArbitraryDataFileListMessage.setId(id); + + // Broadcast request + Network.getInstance().broadcast(peer -> getArbitraryDataFileListMessage); + + // Poll to see if data has arrived + final long singleWait = 100; + long totalWait = 0; + while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) { + try { + Thread.sleep(singleWait); + } catch (InterruptedException e) { + break; + } + + requestEntry = arbitraryDataFileListRequests.get(id); + if (requestEntry == null) + return false; + + if (requestEntry.getA() == null) + break; + + totalWait += singleWait; + } + return true; + } + + + + // Network handlers + + public void onNetworkArbitraryDataFileListMessage(Peer peer, Message message) { + // Don't process if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message; + LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size()); + + // Do we have a pending request for this data? // TODO: might we want to relay all of them anyway? + Triple request = arbitraryDataFileListRequests.get(message.getId()); + if (request == null || request.getA() == null) { + return; + } + boolean isRelayRequest = (request.getB() != null); + + // Does this message's signature match what we're expecting? + byte[] signature = arbitraryDataFileListMessage.getSignature(); + String signature58 = Base58.encode(signature); + if (!request.getA().equals(signature58)) { + return; + } + + List hashes = arbitraryDataFileListMessage.getHashes(); + if (hashes == null || hashes.isEmpty()) { + return; + } + + ArbitraryTransactionData arbitraryTransactionData = null; + ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance(); + + // Check transaction exists and hashes are correct + try (final Repository repository = RepositoryManager.getRepository()) { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) + return; + + arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature); + arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash()); + +// // Check all hashes exist +// for (byte[] hash : hashes) { +// //LOGGER.debug("Received hash {}", Base58.encode(hash)); +// if (!arbitraryDataFile.containsChunk(hash)) { +// // Check the hash against the complete file +// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) { +// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58); +// return; +// } +// } +// } + + // Update requests map to reflect that we've received it + Triple newEntry = new Triple<>(null, null, request.getC()); + arbitraryDataFileListRequests.put(message.getId(), newEntry); + + if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) { + // Go and fetch the actual data, since this isn't a relay request + arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, hashes); + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e); + } + + // Forwarding + if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) { + boolean isBlocked = (arbitraryTransactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(arbitraryTransactionData.getName())); + if (!isBlocked) { + Peer requestingPeer = request.getB(); + if (requestingPeer != null) { + // Add each hash to our local mapping so we know who to ask later + Long now = NTP.getTime(); + for (byte[] hash : hashes) { + String hash58 = Base58.encode(hash); + Triple value = new Triple<>(signature58, peer, now); + arbitraryDataFileManager.arbitraryRelayMap.put(hash58, value); + LOGGER.debug("Added {} to relay map: {}, {}, {}", hash58, signature58, peer, now); + } + + // Forward to requesting peer + LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer); + if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) { + requestingPeer.disconnect("failed to forward arbitrary data file list"); + } + } + } + } + } + + public void onNetworkGetArbitraryDataFileListMessage(Peer peer, Message message) { + // Don't respond if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet(); + + GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message; + byte[] signature = getArbitraryDataFileListMessage.getSignature(); + String signature58 = Base58.encode(signature); + Long now = NTP.getTime(); + Triple newEntry = new Triple<>(signature58, peer, now); + + // If we've seen this request recently, then ignore + if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) { + LOGGER.debug("Ignoring hash list request from peer {} for signature {}", peer, signature58); + return; + } + + LOGGER.debug("Received hash list request from peer {} for signature {}", peer, signature58); + + List hashes = new ArrayList<>(); + ArbitraryTransactionData transactionData = null; + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Firstly we need to lookup this file on chain to get a list of its hashes + transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature); + if (transactionData instanceof ArbitraryTransactionData) { + + // Check if we're even allowed to serve data for this transaction + if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) { + + byte[] hash = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + + // Load file(s) and add any that exist to the list of hashes + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + if (metadataHash != null) { + arbitraryDataFile.setMetadataHash(metadataHash); + + // If we have the metadata file, add its hash + if (arbitraryDataFile.getMetadataFile().exists()) { + hashes.add(arbitraryDataFile.getMetadataHash()); + } + + for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) { + if (chunk.exists()) { + hashes.add(chunk.getHash()); + //LOGGER.trace("Added hash {}", chunk.getHash58()); + } else { + LOGGER.debug("Couldn't add hash {} because it doesn't exist", chunk.getHash58()); + } + } + } else { + // This transaction has no chunks, so include the complete file if we have it + if (arbitraryDataFile.exists()) { + hashes.add(arbitraryDataFile.getHash()); + } + } + } + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e); + } + + // We should only respond if we have at least one hash + if (hashes.size() > 0) { + + // Update requests map to reflect that we've sent it + newEntry = new Triple<>(signature58, null, now); + arbitraryDataFileListRequests.put(message.getId(), newEntry); + + ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes); + arbitraryDataFileListMessage.setId(message.getId()); + if (!peer.sendMessage(arbitraryDataFileListMessage)) { + LOGGER.debug("Couldn't send list of hashes"); + peer.disconnect("failed to send list of hashes"); + } + LOGGER.debug("Sent list of hashes (count: {})", hashes.size()); + + } + else { + boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())); + if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) { + // In relay mode - so ask our other peers if they have it + + long requestTime = getArbitraryDataFileListMessage.getRequestTime(); + int requestHops = getArbitraryDataFileListMessage.getRequestHops(); + getArbitraryDataFileListMessage.setRequestHops(++requestHops); + long totalRequestTime = now - requestTime; + + if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) { + // Relay request hasn't timed out yet, so can potentially be rebroadcast + if (requestHops < RELAY_REQUEST_MAX_HOPS) { + // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast + + LOGGER.info("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops); + Network.getInstance().broadcast( + broadcastPeer -> broadcastPeer == peer || + Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) + ? null : getArbitraryDataFileListMessage); + + } + else { + // This relay request has reached the maximum number of allowed hops + } + } + else { + // This relay request has timed out + } + } + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java new file mode 100644 index 00000000..1b1cb945 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java @@ -0,0 +1,403 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.controller.Controller; +import org.qortal.data.network.ArbitraryPeerData; +import org.qortal.data.network.PeerData; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.network.Network; +import org.qortal.network.Peer; +import org.qortal.network.message.*; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; +import org.qortal.utils.Triple; + +import java.security.SecureRandom; +import java.util.*; +import java.util.stream.Collectors; + +public class ArbitraryDataFileManager { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class); + + private static ArbitraryDataFileManager instance; + + + /** + * Map to keep track of our in progress (outgoing) arbitrary data file requests + */ + private Map arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of hashes that we might need to relay, keyed by the hash of the file (base58 encoded). + * Value is comprised of the base58-encoded signature, the peer that is hosting it, and the timestamp that it was added + */ + public Map> arbitraryRelayMap = Collections.synchronizedMap(new HashMap<>()); + + + private ArbitraryDataFileManager() { + } + + public static ArbitraryDataFileManager getInstance() { + if (instance == null) + instance = new ArbitraryDataFileManager(); + + return instance; + } + + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + final long requestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_REQUEST_TIMEOUT; + arbitraryDataFileRequests.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < requestMinimumTimestamp); + + final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT; + arbitraryRelayMap.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < relayMinimumTimestamp); + } + + + + // Fetch data files by hash + + public boolean fetchAllArbitraryDataFiles(Repository repository, Peer peer, byte[] signature) { + try { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) + return false; + + ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + + // We use null to represent all hashes associated with this transaction + return this.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, null); + + } catch (DataException e) {} + + return false; + } + + public boolean fetchArbitraryDataFiles(Repository repository, + Peer peer, + byte[] signature, + ArbitraryTransactionData arbitraryTransactionData, + List hashes) throws DataException { + + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature); + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + arbitraryDataFile.setMetadataHash(metadataHash); + + // If hashes are null, we will treat this to mean all data hashes associated with this file + if (hashes == null) { + if (metadataHash == null) { + // This transaction has no metadata/chunks, so use the main file hash + hashes = Arrays.asList(arbitraryDataFile.getHash()); + } + else if (!arbitraryDataFile.getMetadataFile().exists()) { + // We don't have the metadata file yet, so request it + hashes = Arrays.asList(arbitraryDataFile.getMetadataFile().getHash()); + } + else { + // Add the chunk hashes + hashes = arbitraryDataFile.getChunkHashes(); + } + } + + boolean receivedAtLeastOneFile = false; + + // Now fetch actual data from this peer + for (byte[] hash : hashes) { + if (!arbitraryDataFile.chunkExists(hash)) { + // Only request the file if we aren't already requesting it from someone else + if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) { + ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null); + if (receivedArbitraryDataFileMessage != null) { + LOGGER.debug("Received data file {} from peer {}", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer); + receivedAtLeastOneFile = true; + } + else { + LOGGER.debug("Peer {} didn't respond with data file {} for signature {}", peer, Base58.encode(hash), Base58.encode(signature)); + } + } + else { + LOGGER.debug("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature)); + } + } + } + + if (receivedAtLeastOneFile) { + // Update our lookup table to indicate that this peer holds data for this signature + String peerAddress = peer.getPeerData().getAddress().toString(); + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); + repository.discardChanges(); + if (arbitraryPeerData.isPeerAddressValid()) { + LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature)); + repository.getArbitraryRepository().save(arbitraryPeerData); + repository.saveChanges(); + } + + // Invalidate the hosted transactions cache as we are now hosting something new + ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache(); + } + + // Check if we have all the files we need for this transaction + if (arbitraryDataFile.allFilesExist()) { + + // We have all the chunks for this transaction, so we should invalidate the transaction's name's + // data cache so that it is rebuilt the next time we serve it + ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData); + + // We may also need to broadcast to the network that we are now hosting files for this transaction, + // but only if these files are in accordance with our storage policy + if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) { + // Use a null peer address to indicate our own + Message newArbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, Arrays.asList(signature)); + Network.getInstance().broadcast(broadcastPeer -> newArbitrarySignatureMessage); + } + } + + return receivedAtLeastOneFile; + } + + private ArbitraryDataFileMessage fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException { + ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature); + boolean fileAlreadyExists = existingFile.exists(); + Message message = null; + + // Fetch the file if it doesn't exist locally + if (!fileAlreadyExists) { + String hash58 = Base58.encode(hash); + LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer)); + arbitraryDataFileRequests.put(hash58, NTP.getTime()); + Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash); + + try { + message = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT); + } catch (InterruptedException e) { + // Will return below due to null message + } + arbitraryDataFileRequests.remove(hash58); + LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58)); + + if (message == null || message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) { + return null; + } + } + ArbitraryDataFileMessage arbitraryDataFileMessage = (ArbitraryDataFileMessage) message; + + // We might want to forward the request to the peer that originally requested it + this.handleArbitraryDataFileForwarding(requestingPeer, message, originalMessage); + + boolean isRelayRequest = (requestingPeer != null); + if (isRelayRequest) { + if (!fileAlreadyExists) { + // File didn't exist locally before the request, and it's a forwarding request, so delete it + LOGGER.debug("Deleting file {} because it was needed for forwarding only", Base58.encode(hash)); + ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile(); + dataFile.delete(); + } + } + + return arbitraryDataFileMessage; + } + + + public void handleArbitraryDataFileForwarding(Peer requestingPeer, Message message, Message originalMessage) { + // Return if there is no originally requesting peer to forward to + if (requestingPeer == null) { + return; + } + + // Return if we're not in relay mode or if this request doesn't need forwarding + if (!Settings.getInstance().isRelayModeEnabled()) { + return; + } + + LOGGER.debug("Received arbitrary data file - forwarding is needed"); + + // The ID needs to match that of the original request + message.setId(originalMessage.getId()); + + if (!requestingPeer.sendMessage(message)) { + LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer); + requestingPeer.disconnect("failed to forward arbitrary data file"); + } + else { + LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer); + } + } + + + // Fetch data directly from peers + + public boolean fetchDataFilesFromPeersForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + ArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true); + + // Firstly fetch peers that claim to be hosting files for this signature + try (final Repository repository = RepositoryManager.getRepository()) { + + List peers = repository.getArbitraryRepository().getArbitraryPeerDataForSignature(signature); + if (peers == null || peers.isEmpty()) { + LOGGER.debug("No peers found for signature {}", signature58); + return false; + } + + LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58); + + // Peers found, so pick a random one and request data from it + int index = new SecureRandom().nextInt(peers.size()); + ArbitraryPeerData arbitraryPeerData = peers.get(index); + String peerAddressString = arbitraryPeerData.getPeerAddress(); + boolean success = Network.getInstance().requestDataFromPeer(peerAddressString, signature); + + // Parse the peer address to find the host and port + String host = null; + int port = -1; + String[] parts = peerAddressString.split(":"); + if (parts.length > 1) { + host = parts[0]; + port = Integer.parseInt(parts[1]); + } + + // If unsuccessful, and using a non-standard port, try a second connection with the default listen port, + // since almost all nodes use that. This is a workaround to account for any ephemeral ports that may + // have made it into the dataset. + if (!success) { + if (host != null && port > 0) { + int defaultPort = Settings.getInstance().getDefaultListenPort(); + if (port != defaultPort) { + String newPeerAddressString = String.format("%s:%d", host, defaultPort); + success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature); + } + } + } + + // If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect + // to each of those in turn until one succeeds. + if (!success) { + if (host != null) { + final String finalHost = host; + List knownPeers = Network.getInstance().getAllKnownPeers().stream() + .filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost)) + .collect(Collectors.toList()); + // Loop through each match and attempt a connection + for (PeerData matchingPeer : knownPeers) { + String matchingPeerAddress = matchingPeer.getAddress().toString(); + success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature); + if (success) { + // Successfully connected, so stop making connections + break; + } + } + } + } + + // Keep track of the success or failure + arbitraryPeerData.markAsAttempted(); + if (success) { + arbitraryPeerData.markAsRetrieved(); + arbitraryPeerData.incrementSuccesses(); + } + else { + arbitraryPeerData.incrementFailures(); + } + repository.discardChanges(); + repository.getArbitraryRepository().save(arbitraryPeerData); + repository.saveChanges(); + + return success; + + } catch (DataException e) { + LOGGER.debug("Unable to fetch peer list from repository"); + } + + return false; + } + + + // Network handlers + + public void onNetworkGetArbitraryDataFileMessage(Peer peer, Message message) { + // Don't respond if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + GetArbitraryDataFileMessage getArbitraryDataFileMessage = (GetArbitraryDataFileMessage) message; + byte[] hash = getArbitraryDataFileMessage.getHash(); + String hash58 = Base58.encode(hash); + byte[] signature = getArbitraryDataFileMessage.getSignature(); + Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet(); + + LOGGER.debug("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash)); + + try { + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + Triple relayInfo = this.arbitraryRelayMap.get(hash58); + + if (arbitraryDataFile.exists()) { + LOGGER.trace("Hash {} exists", hash58); + + // We can serve the file directly as we already have it + ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile); + arbitraryDataFileMessage.setId(message.getId()); + if (!peer.sendMessage(arbitraryDataFileMessage)) { + LOGGER.debug("Couldn't sent file"); + peer.disconnect("failed to send file"); + } + LOGGER.debug("Sent file {}", arbitraryDataFile); + } + else if (relayInfo != null) { + LOGGER.debug("We have relay info for hash {}", Base58.encode(hash)); + // We need to ask this peer for the file + Peer peerToAsk = relayInfo.getB(); + if (peerToAsk != null) { + + // Forward the message to this peer + LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58); + this.fetchArbitraryDataFile(peerToAsk, peer, signature, hash, message); + + // Remove from the map regardless of outcome, as the relay attempt is now considered complete + arbitraryRelayMap.remove(hash58); + } + else { + LOGGER.debug("Peer {} not found in relay info", peer); + } + } + else { + LOGGER.debug("Hash {} doesn't exist and we don't have relay info", hash58); + + // We don't have this file + Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout + LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile)); + + // We'll send empty block summaries message as it's very short + // TODO: use a different message type here + Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList()); + fileUnknownMessage.setId(message.getId()); + if (!peer.sendMessage(fileUnknownMessage)) { + LOGGER.debug("Couldn't sent file-unknown response"); + peer.disconnect("failed to send file-unknown response"); + } + else { + LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile); + } + } + } + catch (DataException e) { + LOGGER.debug("Unable to handle request for arbitrary data file: {}", hash58); + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java new file mode 100644 index 00000000..e2d62f6a --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java @@ -0,0 +1,451 @@ +package org.qortal.controller.arbitrary; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataResource; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.Controller; +import org.qortal.data.network.ArbitraryPeerData; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.list.ResourceListManager; +import org.qortal.network.Network; +import org.qortal.network.Peer; +import org.qortal.network.message.*; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.transaction.ArbitraryTransaction; +import org.qortal.transaction.Transaction.TransactionType; +import org.qortal.utils.ArbitraryTransactionUtils; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +public class ArbitraryDataManager extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataManager.class); + private static final List ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY); + + /** Difficulty (leading zero bits) used in arbitrary data transactions + * Set here so that it can be more easily reduced when running unit tests */ + private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value + + /** Request timeout when transferring arbitrary data */ + public static final long ARBITRARY_REQUEST_TIMEOUT = 10 * 1000L; // ms + + /** Maximum time to hold information about an in-progress relay */ + public static final long ARBITRARY_RELAY_TIMEOUT = 30 * 1000L; // ms + + private static ArbitraryDataManager instance; + private final Object peerDataLock = new Object(); + + private volatile boolean isStopping = false; + + /** + * Map to keep track of cached arbitrary transaction resources. + * When an item is present in this list with a timestamp in the future, we won't invalidate + * its cache when serving that data. This reduces the amount of database lookups that are needed. + */ + private Map arbitraryDataCachedResources = Collections.synchronizedMap(new HashMap<>()); + + /** + * The amount of time to cache a data resource before it is invalidated + */ + private static long ARBITRARY_DATA_CACHE_TIMEOUT = 60 * 60 * 1000L; // 60 minutes + + + + private ArbitraryDataManager() { + } + + public static ArbitraryDataManager getInstance() { + if (instance == null) + instance = new ArbitraryDataManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data Manager"); + + try { + while (!isStopping) { + Thread.sleep(2000); + + // Don't run if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + Thread.sleep(60 * 60 * 1000L); + continue; + } + + List peers = Network.getInstance().getHandshakedPeers(); + + // Disregard peers that have "misbehaved" recently + peers.removeIf(Controller.hasMisbehaved); + + // Don't fetch data if we don't have enough up-to-date peers + if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) { + continue; + } + + // Fetch data according to storage policy + switch (Settings.getInstance().getStoragePolicy()) { + case FOLLOWED: + case FOLLOWED_AND_VIEWED: + this.processNames(); + break; + + case ALL: + this.processAll(); + + case NONE: + case VIEWED: + default: + // Nothing to fetch in advance + Thread.sleep(60000); + break; + } + } + } catch (InterruptedException e) { + // Fall-through to exit thread... + } + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + } + + private void processNames() { + // Fetch latest list of followed names + List followedNames = ResourceListManager.getInstance().getStringsInList("followedNames"); + if (followedNames == null || followedNames.isEmpty()) { + return; + } + + // Loop through the names in the list and fetch transactions for each + for (String name : followedNames) { + this.fetchAndProcessTransactions(name); + } + } + + private void processAll() { + this.fetchAndProcessTransactions(null); + } + + private void fetchAndProcessTransactions(String name) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + + // Paginate queries when fetching arbitrary transactions + final int limit = 100; + int offset = 0; + + while (!isStopping) { + + // Any arbitrary transactions we want to fetch data for? + try (final Repository repository = RepositoryManager.getRepository()) { + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true); + // LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit); + if (signatures == null || signatures.isEmpty()) { + offset = 0; + break; + } + offset += limit; + + // Loop through signatures and remove ones we don't need to process + Iterator iterator = signatures.iterator(); + while (iterator.hasNext()) { + byte[] signature = (byte[]) iterator.next(); + + ArbitraryTransaction arbitraryTransaction = fetchTransaction(repository, signature); + if (arbitraryTransaction == null) { + // Best not to process this one + iterator.remove(); + continue; + } + ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData(); + + // Skip transactions that we don't need to proactively store data for + if (!storageManager.shouldPreFetchData(repository, arbitraryTransactionData)) { + iterator.remove(); + continue; + } + + // Remove transactions that we already have local data for + if (hasLocalData(arbitraryTransaction)) { + iterator.remove(); + continue; + } + } + + if (signatures.isEmpty()) { + continue; + } + + // Pick one at random + final int index = new Random().nextInt(signatures.size()); + byte[] signature = signatures.get(index); + + if (signature == null) { + continue; + } + + // Check to see if we have had a more recent PUT + ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature); + boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData); + if (hasMoreRecentPutTransaction) { + // There is a more recent PUT transaction than the one we are currently processing. + // When a PUT is issued, it replaces any layers that would have been there before. + // Therefore any data relating to this older transaction is no longer needed and we + // shouldn't fetch it from the network. + continue; + } + + // Ask our connected peers if they have files for this signature + // This process automatically then fetches the files themselves if a peer is found + fetchData(arbitraryTransactionData); + + } catch (DataException e) { + LOGGER.error("Repository issue when fetching arbitrary transaction data", e); + } + } + } + + private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) { + try { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) + return null; + + return new ArbitraryTransaction(repository, transactionData); + + } catch (DataException e) { + return null; + } + } + + private boolean hasLocalData(ArbitraryTransaction arbitraryTransaction) { + try { + return arbitraryTransaction.isDataLocal(); + + } catch (DataException e) { + LOGGER.error("Repository issue when checking arbitrary transaction's data is local", e); + return true; + } + } + + + // Entrypoint to request new data from peers + public boolean fetchData(ArbitraryTransactionData arbitraryTransactionData) { + return ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(arbitraryTransactionData); + } + + + // Useful methods used by other parts of the app + + public boolean isSignatureRateLimited(byte[] signature) { + return ArbitraryDataFileListManager.getInstance().isSignatureRateLimited(signature); + } + + public long lastRequestForSignature(byte[] signature) { + return ArbitraryDataFileListManager.getInstance().lastRequestForSignature(signature); + } + + + // Arbitrary data resource cache + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + + // Cleanup file list request caches + ArbitraryDataFileListManager.getInstance().cleanupRequestCache(now); + + // Cleanup file request caches + ArbitraryDataFileManager.getInstance().cleanupRequestCache(now); + } + + public boolean isResourceCached(ArbitraryDataResource resource) { + if (resource == null) { + return false; + } + String key = resource.getUniqueKey(); + + // We don't have an entry for this resource ID, it is not cached + if (this.arbitraryDataCachedResources == null) { + return false; + } + if (!this.arbitraryDataCachedResources.containsKey(key)) { + return false; + } + Long timestamp = this.arbitraryDataCachedResources.get(key); + if (timestamp == null) { + return false; + } + + // If the timestamp has reached the timeout, we should remove it from the cache + long now = NTP.getTime(); + if (now > timestamp) { + this.arbitraryDataCachedResources.remove(key); + return false; + } + + // Current time hasn't reached the timeout, so treat it as cached + return true; + } + + public void addResourceToCache(ArbitraryDataResource resource) { + if (resource == null) { + return; + } + String key = resource.getUniqueKey(); + + // Just in case + if (this.arbitraryDataCachedResources == null) { + this.arbitraryDataCachedResources = new HashMap<>(); + } + + Long now = NTP.getTime(); + if (now == null) { + return; + } + + // Set the timestamp to now + the timeout + Long timestamp = NTP.getTime() + ARBITRARY_DATA_CACHE_TIMEOUT; + this.arbitraryDataCachedResources.put(key, timestamp); + } + + public void invalidateCache(ArbitraryTransactionData arbitraryTransactionData) { + String signature58 = Base58.encode(arbitraryTransactionData.getSignature()); + + if (arbitraryTransactionData.getName() != null) { + String resourceId = arbitraryTransactionData.getName().toLowerCase(); + Service service = arbitraryTransactionData.getService(); + String identifier = arbitraryTransactionData.getIdentifier(); + + ArbitraryDataResource resource = + new ArbitraryDataResource(resourceId, ArbitraryDataFile.ResourceIdType.NAME, service, identifier); + String key = resource.getUniqueKey(); + LOGGER.info("Clearing cache for {}...", resource); + + if (this.arbitraryDataCachedResources.containsKey(key)) { + this.arbitraryDataCachedResources.remove(key); + } + + // Also remove from the failed builds queue in case it previously failed due to missing chunks + ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance(); + if (buildManager.arbitraryDataFailedBuilds.containsKey(key)) { + buildManager.arbitraryDataFailedBuilds.remove(key); + } + + // Remove from the signature requests list now that we have all files for this signature + ArbitraryDataFileListManager.getInstance().removeFromSignatureRequests(signature58); + + // Delete cached files themselves + try { + resource.deleteCache(); + } catch (IOException e) { + LOGGER.info("Unable to delete cache for resource {}: {}", resource, e.getMessage()); + } + } + } + + + // Broadcast list of hosted signatures + + public void broadcastHostedSignatureList() { + try (final Repository repository = RepositoryManager.getRepository()) { + List hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository); + List hostedSignatures = hostedTransactions.stream().map(ArbitraryTransactionData::getSignature).collect(Collectors.toList()); + + // Broadcast the list, using null to represent our peer address + LOGGER.info("Broadcasting list of hosted signatures..."); + Message arbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, hostedSignatures); + Network.getInstance().broadcast(broadcastPeer -> arbitrarySignatureMessage); + + } catch (DataException e) { + LOGGER.error("Repository issue when fetching arbitrary transaction data for broadcast", e); + } + } + + + // Handle incoming arbitrary signatures messages + + public void onNetworkArbitrarySignaturesMessage(Peer peer, Message message) { + // Don't process if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + LOGGER.debug("Received arbitrary signature list from peer {}", peer); + + ArbitrarySignaturesMessage arbitrarySignaturesMessage = (ArbitrarySignaturesMessage) message; + List signatures = arbitrarySignaturesMessage.getSignatures(); + + String peerAddress = peer.getPeerData().getAddress().toString(); + if (arbitrarySignaturesMessage.getPeerAddress() != null) { + // This message is about a different peer than the one that sent it + peerAddress = arbitrarySignaturesMessage.getPeerAddress(); + } + + boolean containsNewEntry = false; + + // Synchronize peer data lookups to make this process thread safe. Otherwise we could broadcast + // the same data multiple times, due to more than one thread processing the same message from different peers + synchronized (this.peerDataLock) { + try (final Repository repository = RepositoryManager.getRepository()) { + for (byte[] signature : signatures) { + + // Check if a record already exists for this hash/host combination + // The port is not checked here - only the host/ip - in order to avoid duplicates + // from filling up the db due to dynamic/ephemeral ports + ArbitraryPeerData existingEntry = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, peer.getPeerData().getAddress().getHost()); + + if (existingEntry == null) { + // We haven't got a record of this mapping yet, so add it + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peerAddress); + repository.discardChanges(); + if (arbitraryPeerData.isPeerAddressValid()) { + LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature)); + repository.getArbitraryRepository().save(arbitraryPeerData); + repository.saveChanges(); + + // Remember that this data is new, so that it can be rebroadcast later + containsNewEntry = true; + } + } + } + + // If at least one signature in this batch was new to us, we should rebroadcast the message to the + // network in case some peers haven't received it yet + if (containsNewEntry) { + LOGGER.debug("Rebroadcasting arbitrary signature list for peer {}", peerAddress); + Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : arbitrarySignaturesMessage); + } else { + // Don't rebroadcast as otherwise we could get into a loop + } + + // If anything needed saving, it would already have called saveChanges() above + repository.discardChanges(); + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while processing arbitrary transaction signature list from peer %s", peer), e); + } + } + } + + + public int getPowDifficulty() { + return this.powDifficulty; + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java new file mode 100644 index 00000000..483ab92f --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java @@ -0,0 +1,86 @@ +package org.qortal.controller.arbitrary; + +import org.qortal.arbitrary.ArbitraryDataResource; +import org.qortal.utils.NTP; + +import java.util.*; + +public class ArbitraryDataRenderManager extends Thread { + + private static ArbitraryDataRenderManager instance; + private volatile boolean isStopping = false; + + /** + * Map to keep track of authorized resources for rendering. + * Keyed by resource ID, with the authorization time as the value. + */ + private Map authorizedResources = Collections.synchronizedMap(new HashMap<>()); + + private static long AUTHORIZATION_TIMEOUT = 60 * 60 * 1000L; // 1 hour + + + public ArbitraryDataRenderManager() { + + } + + public static ArbitraryDataRenderManager getInstance() { + if (instance == null) + instance = new ArbitraryDataRenderManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data Manager"); + + try { + while (!isStopping) { + Thread.sleep(60000); + + Long now = NTP.getTime(); + this.cleanup(now); + } + } catch (InterruptedException e) { + // Fall-through to exit thread... + } + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + } + + public void cleanup(Long now) { + if (now == null) { + return; + } + final long minimumTimestamp = now - AUTHORIZATION_TIMEOUT; + this.authorizedResources.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < minimumTimestamp); + } + + public boolean isAuthorized(ArbitraryDataResource resource) { + ArbitraryDataResource broadResource = new ArbitraryDataResource(resource.getResourceId(), null, null, null); + + for (String authorizedResourceKey : this.authorizedResources.keySet()) { + if (authorizedResourceKey != null && resource != null) { + // Check for exact match + if (Objects.equals(authorizedResourceKey, resource.getUniqueKey())) { + return true; + } + // Check for a broad authorization (which applies to all services and identifiers under an authorized name) + if (Objects.equals(authorizedResourceKey, broadResource.getUniqueKey())) { + return true; + } + } + } + return false; + } + + public void addToAuthorizedResources(ArbitraryDataResource resource) { + if (!this.isAuthorized(resource)) { + this.authorizedResources.put(resource.getUniqueKey(), NTP.getTime()); + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java new file mode 100644 index 00000000..bd686355 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java @@ -0,0 +1,495 @@ +package org.qortal.controller.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.list.ResourceListManager; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.settings.Settings; +import org.qortal.transaction.Transaction; +import org.qortal.utils.Base58; +import org.qortal.utils.FilesystemUtils; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class ArbitraryDataStorageManager extends Thread { + + public enum StoragePolicy { + FOLLOWED_AND_VIEWED, + FOLLOWED, + VIEWED, + ALL, + NONE + } + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataStorageManager.class); + + private static ArbitraryDataStorageManager instance; + private volatile boolean isStopping = false; + + private Long storageCapacity = null; + private long totalDirectorySize = 0L; + private long lastDirectorySizeCheck = 0; + + private List hostedTransactions; + + private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes + + /** Treat storage as full at 90% usage, to reduce risk of going over the limit. + * This is necessary because we don't calculate total storage values before every write. + * It also helps avoid a fetch/delete loop, as we will stop fetching before the hard limit. + * This must be lower than DELETION_THRESHOLD. */ + private static final double STORAGE_FULL_THRESHOLD = 0.90f; // 90% + + /** Start deleting files once we reach 98% usage. + * This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */ + public static final double DELETION_THRESHOLD = 0.98f; // 98% + + public ArbitraryDataStorageManager() { + } + + public static ArbitraryDataStorageManager getInstance() { + if (instance == null) + instance = new ArbitraryDataStorageManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data Storage Manager"); + try { + while (!isStopping) { + Thread.sleep(1000); + + // Don't run if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + Thread.sleep(60 * 60 * 1000L); + continue; + } + + Long now = NTP.getTime(); + if (now == null) { + continue; + } + + // Check the total directory size if we haven't in a while + if (this.shouldCalculateDirectorySize(now)) { + this.calculateDirectorySize(now); + } + + Thread.sleep(59000); + } + } catch (InterruptedException e) { + // Fall-through to exit thread... + } + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + instance = null; + } + + /** + * Check if data relating to a transaction is allowed to + * exist on this node, therefore making it a mirror for this data. + * + * @param arbitraryTransactionData - the transaction + * @return boolean - whether to prefetch or not + */ + public boolean canStoreData(ArbitraryTransactionData arbitraryTransactionData) { + String name = arbitraryTransactionData.getName(); + + // We already have RAW_DATA on chain, so we only need to store data associated with hashes + if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) { + return false; + } + + // Don't store data unless it's an allowed type (public/private) + if (!this.isDataTypeAllowed(arbitraryTransactionData)) { + return false; + } + + // Don't check for storage limits here, as it can cause the cleanup manager to delete existing data + + // Check if our storage policy and and lists allow us to host data for this name + switch (Settings.getInstance().getStoragePolicy()) { + case FOLLOWED_AND_VIEWED: + case ALL: + case VIEWED: + // If the policy includes viewed data, we can host it as long as it's not blocked + return !this.isNameBlocked(name); + + case FOLLOWED: + // If the policy is for followed data only, we have to be following it + return this.isFollowingName(name); + + // For NONE or all else, we shouldn't host this data + case NONE: + default: + return false; + } + } + + /** + * Check if data relating to a transaction should be downloaded + * automatically, making this node a mirror for that data. + * + * @param arbitraryTransactionData - the transaction + * @return boolean - whether to prefetch or not + */ + public boolean shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) { + String name = arbitraryTransactionData.getName(); + + // Only fetch data associated with hashes, as we already have RAW_DATA + if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) { + return false; + } + + // Don't fetch anything more if we're (nearly) out of space + // Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to + // avoid a fetch/delete loop + if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) { + return false; + } + + // Don't fetch anything if we're (nearly) out of space for this name + // Again, make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to + // avoid a fetch/delete loop + if (!this.isStorageSpaceAvailableForName(repository, arbitraryTransactionData.getName(), STORAGE_FULL_THRESHOLD)) { + return false; + } + + // Don't store data unless it's an allowed type (public/private) + if (!this.isDataTypeAllowed(arbitraryTransactionData)) { + return false; + } + + // Handle transactions without names differently + if (name == null) { + return this.shouldPreFetchDataWithoutName(); + } + + // Never fetch data from blocked names, even if they are followed + if (this.isNameBlocked(name)) { + return false; + } + + switch (Settings.getInstance().getStoragePolicy()) { + case FOLLOWED: + case FOLLOWED_AND_VIEWED: + return this.isFollowingName(name); + + case ALL: + return true; + + case NONE: + case VIEWED: + default: + return false; + } + } + + /** + * Don't call this method directly. + * Use the wrapper method shouldPreFetchData() instead, as it contains + * additional checks. + * + * @return boolean - whether the storage policy allows for unnamed data + */ + private boolean shouldPreFetchDataWithoutName() { + switch (Settings.getInstance().getStoragePolicy()) { + case ALL: + return true; + + case NONE: + case VIEWED: + case FOLLOWED: + case FOLLOWED_AND_VIEWED: + default: + return false; + } + } + + private boolean isDataTypeAllowed(ArbitraryTransactionData arbitraryTransactionData) { + byte[] secret = arbitraryTransactionData.getSecret(); + boolean hasSecret = (secret != null && secret.length == 32); + + if (!Settings.getInstance().isPrivateDataEnabled() && !hasSecret) { + // Private data isn't enabled so we can't store data without a valid secret + return false; + } + if (!Settings.getInstance().isPublicDataEnabled() && hasSecret) { + // Public data isn't enabled so we can't store data with a secret + return false; + } + return true; + } + + public boolean isNameBlocked(String name) { + return ResourceListManager.getInstance().listContains("blockedNames", name, false); + } + + private boolean isFollowingName(String name) { + return ResourceListManager.getInstance().listContains("followedNames", name, false); + } + + public List followedNames() { + return ResourceListManager.getInstance().getStringsInList("followedNames"); + } + + private int followedNamesCount() { + return ResourceListManager.getInstance().getItemCountForList("followedNames"); + } + + + // Hosted data + + public List listAllHostedTransactions(Repository repository) { + // Load from cache if we can, to avoid disk reads + if (this.hostedTransactions != null) { + return this.hostedTransactions; + } + + List arbitraryTransactionDataList = new ArrayList<>(); + + // Find all hosted paths + List allPaths = this.findAllHostedPaths(); + + // Loop through each path and attempt to match it to a signature + for (Path path : allPaths) { + try { + String[] contents = path.toFile().list(); + if (contents == null || contents.length == 0) { + // Ignore empty directories + continue; + } + + String signature58 = path.getFileName().toString(); + byte[] signature = Base58.decode(signature58); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (transactionData == null || transactionData.getType() != Transaction.TransactionType.ARBITRARY) { + continue; + } + arbitraryTransactionDataList.add((ArbitraryTransactionData) transactionData); + + } catch (DataException e) { + continue; + } + } + + // Update cache + this.hostedTransactions = arbitraryTransactionDataList; + + return arbitraryTransactionDataList; + } + + /** + * Warning: this method will walk through the entire data directory + * Do not call it too frequently as it could create high disk load + * in environments with a large amount of hosted data. + * @return a list of paths that are being hosted + */ + public List findAllHostedPaths() { + Path dataPath = Paths.get(Settings.getInstance().getDataPath()); + Path tempPath = Paths.get(Settings.getInstance().getTempDataPath()); + + // Walk through 3 levels of the file tree and find directories that are greater than 32 characters in length + // Also exclude the _temp and _misc paths if present + List allPaths = new ArrayList<>(); + try { + allPaths = Files.walk(dataPath, 3) + .filter(Files::isDirectory) + .filter(path -> !path.toAbsolutePath().toString().contains(tempPath.toAbsolutePath().toString()) + && !path.toString().contains("_misc") + && path.getFileName().toString().length() > 32) + .collect(Collectors.toList()); + } + catch (IOException e) { + LOGGER.info("Unable to walk through hosted data: {}", e.getMessage()); + } + + return allPaths; + } + + public void invalidateHostedTransactionsCache() { + this.hostedTransactions = null; + } + + + // Size limits + + /** + * Rate limit to reduce IO load + */ + public boolean shouldCalculateDirectorySize(Long now) { + if (now == null) { + return false; + } + // If storage capacity is null, we need to calculate it + if (this.storageCapacity == null) { + return true; + } + // If we haven't checked for a while, we need to check it now + if (now - lastDirectorySizeCheck > DIRECTORY_SIZE_CHECK_INTERVAL) { + return true; + } + + // We shouldn't check this time, as we want to reduce IO load on the SSD/HDD + return false; + } + + public void calculateDirectorySize(Long now) { + if (now == null) { + return; + } + + long totalSize = 0; + long remainingCapacity = 0; + + // Calculate remaining capacity + try { + remainingCapacity = this.getRemainingUsableStorageCapacity(); + } catch (IOException e) { + LOGGER.info("Unable to calculate remaining storage capacity: {}", e.getMessage()); + return; + } + + // Calculate total size of data directory + LOGGER.trace("Calculating data directory size..."); + Path dataDirectoryPath = Paths.get(Settings.getInstance().getDataPath()); + if (dataDirectoryPath.toFile().exists()) { + totalSize += FileUtils.sizeOfDirectory(dataDirectoryPath.toFile()); + } + + // Add total size of temp directory, if it's not already inside the data directory + Path tempDirectoryPath = Paths.get(Settings.getInstance().getTempDataPath()); + if (tempDirectoryPath.toFile().exists()) { + if (!FilesystemUtils.isChild(tempDirectoryPath, dataDirectoryPath)) { + LOGGER.trace("Calculating temp directory size..."); + totalSize += FileUtils.sizeOfDirectory(dataDirectoryPath.toFile()); + } + } + + this.totalDirectorySize = totalSize; + this.lastDirectorySizeCheck = now; + + // It's essential that used space (this.totalDirectorySize) is included in the storage capacity + LOGGER.trace("Calculating total storage capacity..."); + long storageCapacity = remainingCapacity + this.totalDirectorySize; + + // Make sure to limit the storage capacity if the user is overriding it in the settings + if (Settings.getInstance().getMaxStorageCapacity() != null) { + storageCapacity = Math.min(storageCapacity, Settings.getInstance().getMaxStorageCapacity()); + } + this.storageCapacity = storageCapacity; + + LOGGER.info("Total used: {} bytes, Total capacity: {} bytes", this.totalDirectorySize, this.storageCapacity); + } + + private long getRemainingUsableStorageCapacity() throws IOException { + // Create data directory if it doesn't exist so that we can perform calculations on it + Path dataDirectoryPath = Paths.get(Settings.getInstance().getDataPath()); + if (!dataDirectoryPath.toFile().exists()) { + Files.createDirectories(dataDirectoryPath); + } + + return dataDirectoryPath.toFile().getUsableSpace(); + } + + public long getTotalDirectorySize() { + return this.totalDirectorySize; + } + + public boolean isStorageSpaceAvailable(double threshold) { + if (!this.isStorageCapacityCalculated()) { + return false; + } + + long maxStorageCapacity = (long)((double)this.storageCapacity * threshold); + if (this.totalDirectorySize >= maxStorageCapacity) { + return false; + } + return true; + } + + public boolean isStorageSpaceAvailableForName(Repository repository, String name, double threshold) { + if (!this.isStorageSpaceAvailable(threshold)) { + // No storage space available at all, so no need to check this name + return false; + } + + if (name == null) { + // This transaction doesn't have a name, so fall back to total space limitations + return true; + } + + int followedNamesCount = this.followedNamesCount(); + if (followedNamesCount == 0) { + // Not following any names, so we have space + return true; + } + + long totalSizeForName = 0; + long maxStoragePerName = this.storageCapacityPerName(threshold); + + // Fetch all hosted transactions + List hostedTransactions = this.listAllHostedTransactions(repository); + for (ArbitraryTransactionData transactionData : hostedTransactions) { + String transactionName = transactionData.getName(); + if (!Objects.equals(name, transactionName)) { + // Transaction relates to a different name + continue; + } + + totalSizeForName += transactionData.getSize(); + } + + // Have we reached the limit for this name? + if (totalSizeForName > maxStoragePerName) { + return false; + } + + return true; + } + + public long storageCapacityPerName(double threshold) { + int followedNamesCount = this.followedNamesCount(); + if (followedNamesCount == 0) { + // Not following any names, so we have the total space available + return this.getStorageCapacityIncludingThreshold(threshold); + } + + double maxStorageCapacity = (double)this.storageCapacity * threshold; + long maxStoragePerName = (long)(maxStorageCapacity / (double)followedNamesCount); + + return maxStoragePerName; + } + + public boolean isStorageCapacityCalculated() { + return (this.storageCapacity != null); + } + + public Long getStorageCapacity() { + return this.storageCapacity; + } + + public Long getStorageCapacityIncludingThreshold(double threshold) { + if (this.storageCapacity == null) { + return null; + } + return (long)(this.storageCapacity * threshold); + } +} diff --git a/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java index 7d845a7b..0b941c0c 100644 --- a/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java +++ b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java @@ -310,7 +310,7 @@ public class NamesDatabaseIntegrityCheck { // Fetch all the confirmed REGISTER_NAME transaction signatures List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria( null, null, null, ALL_NAME_TX_TYPE, null, null, - ConfirmationStatus.CONFIRMED, null, null, false); + null, ConfirmationStatus.CONFIRMED, null, null, false); for (byte[] signature : signatures) { TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); diff --git a/src/main/java/org/qortal/crypto/AES.java b/src/main/java/org/qortal/crypto/AES.java new file mode 100644 index 00000000..0e8018f5 --- /dev/null +++ b/src/main/java/org/qortal/crypto/AES.java @@ -0,0 +1,205 @@ +/* + * MIT License + * + * Copyright (c) 2017 Eugen Paraschiv + * Modified in 2021 by CalDescent + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package org.qortal.crypto; + +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import javax.crypto.BadPaddingException; +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKeyFactory; +import javax.crypto.SealedObject; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.Serializable; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.KeySpec; +import java.util.Base64; + +public class AES { + + public static String encrypt(String algorithm, String input, SecretKey key, IvParameterSpec iv) + throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, + InvalidKeyException, BadPaddingException, IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.ENCRYPT_MODE, key, iv); + byte[] cipherText = cipher.doFinal(input.getBytes()); + return Base64.getEncoder() + .encodeToString(cipherText); + } + + public static String decrypt(String algorithm, String cipherText, SecretKey key, IvParameterSpec iv) + throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, + InvalidKeyException, BadPaddingException, IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.DECRYPT_MODE, key, iv); + byte[] plainText = cipher.doFinal(Base64.getDecoder() + .decode(cipherText)); + return new String(plainText); + } + + public static SecretKey generateKey(int n) throws NoSuchAlgorithmException { + KeyGenerator keyGenerator = KeyGenerator.getInstance("AES"); + keyGenerator.init(n); + SecretKey key = keyGenerator.generateKey(); + return key; + } + + public static SecretKey getKeyFromPassword(String password, String salt) + throws NoSuchAlgorithmException, InvalidKeySpecException { + SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256"); + KeySpec spec = new PBEKeySpec(password.toCharArray(), salt.getBytes(), 65536, 256); + SecretKey secret = new SecretKeySpec(factory.generateSecret(spec) + .getEncoded(), "AES"); + return secret; + } + + public static IvParameterSpec generateIv() { + byte[] iv = new byte[16]; + new SecureRandom().nextBytes(iv); + return new IvParameterSpec(iv); + } + + public static void encryptFile(String algorithm, SecretKey key, + String inputFilePath, String outputFilePath) throws IOException, + NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, InvalidKeyException, + BadPaddingException, IllegalBlockSizeException { + + File inputFile = new File(inputFilePath); + File outputFile = new File(outputFilePath); + + IvParameterSpec iv = AES.generateIv(); + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.ENCRYPT_MODE, key, iv); + FileInputStream inputStream = new FileInputStream(inputFile); + FileOutputStream outputStream = new FileOutputStream(outputFile); + + // Prepend the output stream with the 16 byte initialization vector + outputStream.write(iv.getIV()); + + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + byte[] output = cipher.update(buffer, 0, bytesRead); + if (output != null) { + outputStream.write(output); + } + } + byte[] outputBytes = cipher.doFinal(); + if (outputBytes != null) { + outputStream.write(outputBytes); + } + inputStream.close(); + outputStream.close(); + } + + public static void decryptFile(String algorithm, SecretKey key, String encryptedFilePath, + String decryptedFilePath) throws IOException, NoSuchPaddingException, + NoSuchAlgorithmException, InvalidAlgorithmParameterException, InvalidKeyException, + BadPaddingException, IllegalBlockSizeException { + + File encryptedFile = new File(encryptedFilePath); + File decryptedFile = new File(decryptedFilePath); + + File parent = decryptedFile.getParentFile(); + if (!parent.isDirectory() && !parent.mkdirs()) { + throw new IOException("Failed to create directory " + parent); + } + + FileInputStream inputStream = new FileInputStream(encryptedFile); + FileOutputStream outputStream = new FileOutputStream(decryptedFile); + + // Read the initialization vector from the first 16 bytes of the file + byte[] iv = new byte[16]; + inputStream.read(iv); + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv)); + + byte[] buffer = new byte[64]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + byte[] output = cipher.update(buffer, 0, bytesRead); + if (output != null) { + outputStream.write(output); + } + } + byte[] output = cipher.doFinal(); + if (output != null) { + outputStream.write(output); + } + inputStream.close(); + outputStream.close(); + } + + public static SealedObject encryptObject(String algorithm, Serializable object, SecretKey key, + IvParameterSpec iv) throws NoSuchPaddingException, NoSuchAlgorithmException, + InvalidAlgorithmParameterException, InvalidKeyException, IOException, IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.ENCRYPT_MODE, key, iv); + SealedObject sealedObject = new SealedObject(object, cipher); + return sealedObject; + } + + public static Serializable decryptObject(String algorithm, SealedObject sealedObject, SecretKey key, + IvParameterSpec iv) throws NoSuchPaddingException, NoSuchAlgorithmException, + InvalidAlgorithmParameterException, InvalidKeyException, ClassNotFoundException, + BadPaddingException, IllegalBlockSizeException, IOException { + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.DECRYPT_MODE, key, iv); + Serializable unsealObject = (Serializable) sealedObject.getObject(cipher); + return unsealObject; + } + + public static String encryptPasswordBased(String plainText, SecretKey key, IvParameterSpec iv) + throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, + InvalidKeyException, BadPaddingException, IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); + cipher.init(Cipher.ENCRYPT_MODE, key, iv); + return Base64.getEncoder() + .encodeToString(cipher.doFinal(plainText.getBytes())); + } + + public static String decryptPasswordBased(String cipherText, SecretKey key, IvParameterSpec iv) + throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, + InvalidKeyException, BadPaddingException, IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5PADDING"); + cipher.init(Cipher.DECRYPT_MODE, key, iv); + return new String(cipher.doFinal(Base64.getDecoder() + .decode(cipherText))); + } + +} diff --git a/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceInfo.java b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceInfo.java new file mode 100644 index 00000000..b94f7e36 --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceInfo.java @@ -0,0 +1,19 @@ +package org.qortal.data.arbitrary; + +import org.qortal.arbitrary.misc.Service; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + +@XmlAccessorType(XmlAccessType.FIELD) +public class ArbitraryResourceInfo { + + public String name; + public Service service; + public String identifier; + public ArbitraryResourceStatus status; + + public ArbitraryResourceInfo() { + } + +} diff --git a/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceNameInfo.java b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceNameInfo.java new file mode 100644 index 00000000..b9be8034 --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceNameInfo.java @@ -0,0 +1,17 @@ +package org.qortal.data.arbitrary; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import java.util.ArrayList; +import java.util.List; + +@XmlAccessorType(XmlAccessType.FIELD) +public class ArbitraryResourceNameInfo { + + public String name; + public List resources = new ArrayList<>(); + + public ArbitraryResourceNameInfo() { + } + +} diff --git a/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java new file mode 100644 index 00000000..8f835add --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java @@ -0,0 +1,42 @@ +package org.qortal.data.arbitrary; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + +@XmlAccessorType(XmlAccessType.FIELD) +public class ArbitraryResourceStatus { + + public enum Status { + NOT_STARTED("Not started", "Downloading not yet started"), + DOWNLOADING("Downloading", "Locating and downloading files..."), + DOWNLOADED("Downloaded", "Files downloaded"), + BUILDING("Building", "Building..."), + READY("Ready", "Ready"), + MISSING_DATA("Missing data", "Unable to locate all files. Please try again later"), + BUILD_FAILED("Build failed", "Build failed. Please try again later"), + UNSUPPORTED("Unsupported", "Unsupported request"), + BLOCKED("Blocked", "Name is blocked so content cannot be served"); + + private String title; + private String description; + + Status(String title, String description) { + this.title = title; + this.description = description; + } + } + + private String id; + private String title; + private String description; + + public ArbitraryResourceStatus() { + } + + public ArbitraryResourceStatus(Status status) { + this.id = status.toString(); + this.title = status.title; + this.description = status.description; + } + +} diff --git a/src/main/java/org/qortal/data/block/BlockData.java b/src/main/java/org/qortal/data/block/BlockData.java index 481d3691..61d1a7fb 100644 --- a/src/main/java/org/qortal/data/block/BlockData.java +++ b/src/main/java/org/qortal/data/block/BlockData.java @@ -9,7 +9,10 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.qortal.block.BlockChain; +import org.qortal.settings.Settings; import org.qortal.crypto.Crypto; +import org.qortal.utils.NTP; // All properties to be converted to JSON via JAX-RS @XmlAccessorType(XmlAccessType.FIELD) @@ -208,6 +211,13 @@ public class BlockData implements Serializable { this.onlineAccountsSignatures = onlineAccountsSignatures; } + public boolean isTrimmed() { + long onlineAccountSignaturesTrimmedTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime(); + long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime(); + long blockTimestamp = this.getTimestamp(); + return blockTimestamp < onlineAccountSignaturesTrimmedTimestamp && blockTimestamp < currentTrimmableTimestamp; + } + // JAXB special @XmlElement(name = "minterAddress") diff --git a/src/main/java/org/qortal/data/network/ArbitraryPeerData.java b/src/main/java/org/qortal/data/network/ArbitraryPeerData.java new file mode 100644 index 00000000..30f8cf24 --- /dev/null +++ b/src/main/java/org/qortal/data/network/ArbitraryPeerData.java @@ -0,0 +1,112 @@ +package org.qortal.data.network; + +import com.google.common.net.InetAddresses; +import org.qortal.crypto.Crypto; +import org.qortal.network.Peer; +import org.qortal.utils.NTP; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +public class ArbitraryPeerData { + + private final byte[] hash; + private final String peerAddress; + private Integer successes; + private Integer failures; + private Long lastAttempted; + private Long lastRetrieved; + + public ArbitraryPeerData(byte[] hash, String peerAddress, Integer successes, + Integer failures, Long lastAttempted, Long lastRetrieved) { + this.hash = hash; + this.peerAddress = peerAddress; + this.successes = successes; + this.failures = failures; + this.lastAttempted = lastAttempted; + this.lastRetrieved = lastRetrieved; + } + + public ArbitraryPeerData(byte[] signature, Peer peer) { + this(Crypto.digest(signature), peer.getPeerData().getAddress().toString(), + 0, 0, 0L, 0L); + } + + public ArbitraryPeerData(byte[] signature, String peerAddress) { + this(Crypto.digest(signature), peerAddress, 0, 0, 0L, 0L); + } + + public boolean isPeerAddressValid() { + // Validate the peer address to prevent arbitrary values being added to the db + String[] parts = this.peerAddress.split(":"); + if (parts.length != 2) { + // Invalid format + return false; + } + String host = parts[0]; + if (!InetAddresses.isInetAddress(host)) { + // Invalid host + return false; + } + int port = Integer.valueOf(parts[1]); + if (port <= 0 || port > 65535) { + // Invalid port + return false; + } + + // Make sure that it's not a local address + try { + InetAddress addr = InetAddress.getByName(host); + if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isSiteLocalAddress()) { + // Ignore local addresses + return false; + } + } catch (UnknownHostException e) { + return false; + } + + // Valid host/port combination + return true; + } + + public void incrementSuccesses() { + this.successes++; + } + + public void incrementFailures() { + this.failures++; + } + + public void markAsAttempted() { + this.lastAttempted = NTP.getTime(); + } + + public void markAsRetrieved() { + this.lastRetrieved = NTP.getTime(); + } + + public byte[] getHash() { + return this.hash; + } + + public String getPeerAddress() { + return this.peerAddress; + } + + public Integer getSuccesses() { + return this.successes; + } + + public Integer getFailures() { + return this.failures; + } + + public Long getLastAttempted() { + return this.lastAttempted; + } + + public Long getLastRetrieved() { + return this.lastRetrieved; + } + +} diff --git a/src/main/java/org/qortal/data/network/PeerData.java b/src/main/java/org/qortal/data/network/PeerData.java index 3362ff11..09982c00 100644 --- a/src/main/java/org/qortal/data/network/PeerData.java +++ b/src/main/java/org/qortal/data/network/PeerData.java @@ -13,6 +13,8 @@ import io.swagger.v3.oas.annotations.media.Schema; @XmlAccessorType(XmlAccessType.FIELD) public class PeerData { + public static final int MAX_PEER_ADDRESS_SIZE = 255; + // Properties // Don't expose this via JAXB - use pretty getter instead diff --git a/src/main/java/org/qortal/data/transaction/ArbitraryTransactionData.java b/src/main/java/org/qortal/data/transaction/ArbitraryTransactionData.java index 56529852..acd5c3a6 100644 --- a/src/main/java/org/qortal/data/transaction/ArbitraryTransactionData.java +++ b/src/main/java/org/qortal/data/transaction/ArbitraryTransactionData.java @@ -1,17 +1,22 @@ package org.qortal.data.transaction; import java.util.List; +import java.util.Map; import javax.xml.bind.Unmarshaller; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import org.eclipse.persistence.oxm.annotations.XmlDiscriminatorValue; +import org.qortal.arbitrary.misc.Service; import org.qortal.data.PaymentData; import org.qortal.transaction.Transaction.TransactionType; import io.swagger.v3.oas.annotations.media.Schema; +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.toMap; + // All properties to be converted to JSON via JAXB @XmlAccessorType(XmlAccessType.FIELD) @Schema(allOf = { TransactionData.class }) @@ -25,17 +30,65 @@ public class ArbitraryTransactionData extends TransactionData { DATA_HASH; } + // Methods + public enum Method { + PUT(0), // A complete replacement of a resource + PATCH(1); // An update / partial replacement of a resource + + public final int value; + + private static final Map map = stream(Method.values()) + .collect(toMap(method -> method.value, method -> method)); + + Method(int value) { + this.value = value; + } + + public static Method valueOf(int value) { + return map.get(value); + } + } + + // Compression types + public enum Compression { + NONE(0), + ZIP(1); + + public final int value; + + private static final Map map = stream(Compression.values()) + .collect(toMap(compression -> compression.value, compression -> compression)); + + Compression(int value) { + this.value = value; + } + + public static Compression valueOf(int value) { + return map.get(value); + } + } + // Properties private int version; - @Schema(example = "sender_public_key") private byte[] senderPublicKey; - private int service; + private Service service; + private int nonce; + private int size; + + private String name; + private String identifier; + private Method method; + private byte[] secret; + private Compression compression; @Schema(example = "raw_data_in_base58") private byte[] data; private DataType dataType; + @Schema(example = "metadata_file_hash_in_base58") + private byte[] metadataHash; + private List payments; // Constructors @@ -50,14 +103,24 @@ public class ArbitraryTransactionData extends TransactionData { } public ArbitraryTransactionData(BaseTransactionData baseTransactionData, - int version, int service, byte[] data, DataType dataType, List payments) { + int version, Service service, int nonce, int size, + String name, String identifier, Method method, byte[] secret, Compression compression, + byte[] data, DataType dataType, byte[] metadataHash, List payments) { super(TransactionType.ARBITRARY, baseTransactionData); this.senderPublicKey = baseTransactionData.creatorPublicKey; this.version = version; this.service = service; + this.nonce = nonce; + this.size = size; + this.name = name; + this.identifier = identifier; + this.method = method; + this.secret = secret; + this.compression = compression; this.data = data; this.dataType = dataType; + this.metadataHash = metadataHash; this.payments = payments; } @@ -71,10 +134,42 @@ public class ArbitraryTransactionData extends TransactionData { return this.version; } - public int getService() { + public Service getService() { return this.service; } + public int getNonce() { + return this.nonce; + } + + public void setNonce(int nonce) { + this.nonce = nonce; + } + + public int getSize() { + return this.size; + } + + public String getName() { + return this.name; + } + + public String getIdentifier() { + return (this.identifier != "") ? this.identifier : null; + } + + public Method getMethod() { + return this.method; + } + + public byte[] getSecret() { + return this.secret; + } + + public Compression getCompression() { + return this.compression; + } + public byte[] getData() { return this.data; } @@ -91,6 +186,14 @@ public class ArbitraryTransactionData extends TransactionData { this.dataType = dataType; } + public byte[] getMetadataHash() { + return this.metadataHash; + } + + public void setMetadataHash(byte[] metadataHash) { + this.metadataHash = metadataHash; + } + public List getPayments() { return this.payments; } diff --git a/src/main/java/org/qortal/gui/SysTray.java b/src/main/java/org/qortal/gui/SysTray.java index 4aee5a8d..7a24f825 100644 --- a/src/main/java/org/qortal/gui/SysTray.java +++ b/src/main/java/org/qortal/gui/SysTray.java @@ -290,8 +290,8 @@ public class SysTray { } public void setTrayIcon(int iconid) { - if (trayIcon != null) { - try { + try { + if (trayIcon != null) { switch (iconid) { case 1: this.trayIcon.setImage(Gui.loadImage("icons/qortal_ui_tray_syncing_time-alt.png")); @@ -306,9 +306,9 @@ public class SysTray { this.trayIcon.setImage(Gui.loadImage("icons/qortal_ui_tray_synced.png")); break; } - } catch (NullPointerException e) { - LOGGER.info("Unable to set tray icon"); } + } catch (Exception e) { + LOGGER.info("Unable to set tray icon: {}", e.getMessage()); } } diff --git a/src/main/java/org/qortal/list/ResourceList.java b/src/main/java/org/qortal/list/ResourceList.java index e682441c..fbdc8470 100644 --- a/src/main/java/org/qortal/list/ResourceList.java +++ b/src/main/java/org/qortal/list/ResourceList.java @@ -19,8 +19,7 @@ public class ResourceList { private static final Logger LOGGER = LogManager.getLogger(ResourceList.class); - private String category; - private String resourceName; + private String name; private List list = new ArrayList<>(); /** @@ -29,13 +28,11 @@ public class ResourceList { * This can be used for local blocking, or even for curating and sharing content lists * Lists are backed off to JSON files (in the lists folder) to ease sharing between nodes and users * - * @param category - for instance "blacklist", "whitelist", or "userlist" - * @param resourceName - for instance "address", "poll", or "group" + * @param name - the name of the list, for instance "blockedAddresses" * @throws IOException */ - public ResourceList(String category, String resourceName) throws IOException { - this.category = category; - this.resourceName = resourceName; + public ResourceList(String name) throws IOException { + this.name = name; this.load(); } @@ -43,17 +40,13 @@ public class ResourceList { /* Filesystem */ private Path getFilePath() { - String pathString = String.format("%s%s%s_%s.json", Settings.getInstance().getListsPath(), - File.separator, this.category, this.resourceName); + String pathString = String.format("%s.json", Paths.get(Settings.getInstance().getListsPath(), this.name)); return Paths.get(pathString); } public void save() throws IOException { - if (this.resourceName == null) { - throw new IllegalStateException("Can't save list with missing resource name"); - } - if (this.category == null) { - throw new IllegalStateException("Can't save list with missing category"); + if (this.name == null) { + throw new IllegalStateException("Can't save list with missing name"); } String jsonString = ResourceList.listToJSONString(this.list); Path filePath = this.getFilePath(); @@ -91,7 +84,7 @@ public class ResourceList { try { return this.load(); } catch (IOException e) { - LOGGER.info("Unable to revert {} {}", this.resourceName, this.category); + LOGGER.info("Unable to revert list {}: {}", this.name, e.getMessage()); } return false; } @@ -103,7 +96,7 @@ public class ResourceList { if (resource == null || this.list == null) { return; } - if (!this.contains(resource)) { + if (!this.contains(resource, true)) { this.list.add(resource); } } @@ -115,11 +108,17 @@ public class ResourceList { this.list.remove(resource); } - public boolean contains(String resource) { + public boolean contains(String resource, boolean caseSensitive) { if (resource == null || this.list == null) { return false; } - return this.list.contains(resource); + + if (caseSensitive) { + return this.list.contains(resource); + } + else { + return this.list.stream().anyMatch(resource::equalsIgnoreCase); + } } @@ -153,16 +152,16 @@ public class ResourceList { return ResourceList.listToJSONString(this.list); } - public String getCategory() { - return this.category; + public String getName() { + return this.name; } - public String getResourceName() { - return this.resourceName; + public List getList() { + return this.list; } public String toString() { - return String.format("%s %s", this.category, this.resourceName); + return this.name; } } diff --git a/src/main/java/org/qortal/list/ResourceListManager.java b/src/main/java/org/qortal/list/ResourceListManager.java index 4d0d19c3..4d4d559d 100644 --- a/src/main/java/org/qortal/list/ResourceListManager.java +++ b/src/main/java/org/qortal/list/ResourceListManager.java @@ -26,10 +26,9 @@ public class ResourceListManager { return instance; } - private ResourceList getList(String category, String resourceName) { + private ResourceList getList(String listName) { for (ResourceList list : this.lists) { - if (Objects.equals(list.getCategory(), category) && - Objects.equals(list.getResourceName(), resourceName)) { + if (Objects.equals(list.getName(), listName)) { return list; } } @@ -37,19 +36,19 @@ public class ResourceListManager { // List doesn't exist in array yet, so create it // This will load any existing data from the filesystem try { - ResourceList list = new ResourceList(category, resourceName); + ResourceList list = new ResourceList(listName); this.lists.add(list); return list; } catch (IOException e) { - LOGGER.info("Unable to load or create list {} {}: {}", category, resourceName, e.getMessage()); + LOGGER.info("Unable to load or create list {}: {}", listName, e.getMessage()); return null; } } - public boolean addToList(String category, String resourceName, String item, boolean save) { - ResourceList list = this.getList(category, resourceName); + public boolean addToList(String listName, String item, boolean save) { + ResourceList list = this.getList(listName); if (list == null) { return false; } @@ -67,8 +66,8 @@ public class ResourceListManager { } } - public boolean removeFromList(String category, String resourceName, String item, boolean save) { - ResourceList list = this.getList(category, resourceName); + public boolean removeFromList(String listName, String item, boolean save) { + ResourceList list = this.getList(listName); if (list == null) { return false; } @@ -87,16 +86,16 @@ public class ResourceListManager { } } - public boolean listContains(String category, String resourceName, String address) { - ResourceList list = this.getList(category, resourceName); + public boolean listContains(String listName, String item, boolean caseSensitive) { + ResourceList list = this.getList(listName); if (list == null) { return false; } - return list.contains(address); + return list.contains(item, caseSensitive); } - public void saveList(String category, String resourceName) { - ResourceList list = this.getList(category, resourceName); + public void saveList(String listName) { + ResourceList list = this.getList(listName); if (list == null) { return; } @@ -109,20 +108,36 @@ public class ResourceListManager { } } - public void revertList(String category, String resourceName) { - ResourceList list = this.getList(category, resourceName); + public void revertList(String listName) { + ResourceList list = this.getList(listName); if (list == null) { return; } list.revert(); } - public String getJSONStringForList(String category, String resourceName) { - ResourceList list = this.getList(category, resourceName); + public String getJSONStringForList(String listName) { + ResourceList list = this.getList(listName); if (list == null) { return null; } return list.getJSONString(); } + public List getStringsInList(String listName) { + ResourceList list = this.getList(listName); + if (list == null) { + return null; + } + return list.getList(); + } + + public int getItemCountForList(String listName) { + ResourceList list = this.getList(listName); + if (list == null) { + return 0; + } + return list.getList().size(); + } + } diff --git a/src/main/java/org/qortal/network/Handshake.java b/src/main/java/org/qortal/network/Handshake.java index 78b181ce..d88654cf 100644 --- a/src/main/java/org/qortal/network/Handshake.java +++ b/src/main/java/org/qortal/network/Handshake.java @@ -48,6 +48,9 @@ public enum Handshake { return null; } + // Make a note of the senderPeerAddress, as this should be our public IP + Network.getInstance().ourPeerAddressUpdated(helloMessage.getSenderPeerAddress()); + String versionString = helloMessage.getVersionString(); Matcher matcher = peer.VERSION_PATTERN.matcher(versionString); @@ -87,8 +90,9 @@ public enum Handshake { public void action(Peer peer) { String versionString = Controller.getInstance().getVersionString(); long timestamp = NTP.getTime(); + String senderPeerAddress = peer.getPeerData().getAddress().toString(); - Message helloMessage = new HelloMessage(timestamp, versionString); + Message helloMessage = new HelloMessage(timestamp, versionString, senderPeerAddress); if (!peer.sendMessage(helloMessage)) peer.disconnect("failed to send HELLO"); } diff --git a/src/main/java/org/qortal/network/Network.java b/src/main/java/org/qortal/network/Network.java index 7487e64f..345b275f 100644 --- a/src/main/java/org/qortal/network/Network.java +++ b/src/main/java/org/qortal/network/Network.java @@ -6,6 +6,8 @@ import org.bouncycastle.crypto.params.Ed25519PrivateKeyParameters; import org.bouncycastle.crypto.params.Ed25519PublicKeyParameters; import org.qortal.block.BlockChain; import org.qortal.controller.Controller; +import org.qortal.controller.arbitrary.ArbitraryDataFileManager; +import org.qortal.controller.arbitrary.ArbitraryDataManager; import org.qortal.crypto.Crypto; import org.qortal.data.block.BlockData; import org.qortal.data.network.PeerData; @@ -15,6 +17,7 @@ import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.settings.Settings; +import org.qortal.utils.Base58; import org.qortal.utils.ExecuteProduceConsume; import org.qortal.utils.ExecuteProduceConsume.StatsSnapshot; import org.qortal.utils.NTP; @@ -114,6 +117,9 @@ public class Network { private final Lock mergePeersLock = new ReentrantLock(); + private List ourExternalIpAddressHistory = new ArrayList<>(); + private String ourExternalIpAddress = null; + // Constructors private Network() { @@ -234,6 +240,81 @@ public class Network { } } + public boolean requestDataFromPeer(String peerAddressString, byte[] signature) { + if (peerAddressString != null) { + PeerAddress peerAddress = PeerAddress.fromString(peerAddressString); + + // Reuse an existing PeerData instance if it's already in the known peers list + PeerData peerData = this.allKnownPeers.stream() + .filter(knownPeerData -> knownPeerData.getAddress().equals(peerAddress)) + .findFirst() + .orElse(null); + + if (peerData == null) { + // Not a known peer, so we need to create one + Long addedWhen = NTP.getTime(); + String addedBy = "requestDataFromPeer"; + peerData = new PeerData(peerAddress, addedWhen, addedBy); + } + + if (peerData == null) { + LOGGER.info("PeerData is null when trying to request data from peer {}", peerAddressString); + return false; + } + + // Check if we're already connected to and handshaked with this peer + Peer connectedPeer = this.connectedPeers.stream() + .filter(p -> p.getPeerData().getAddress().equals(peerAddress)) + .findFirst() + .orElse(null); + boolean isConnected = (connectedPeer != null); + + boolean isHandshaked = this.getHandshakedPeers().stream() + .anyMatch(p -> p.getPeerData().getAddress().equals(peerAddress)); + + if (isConnected && isHandshaked) { + // Already connected + return this.requestDataFromConnectedPeer(connectedPeer, signature); + } + else { + // We need to connect to this peer before we can request data + try { + if (!isConnected) { + // Add this signature to the list of pending requests for this peer + LOGGER.info("Making connection to peer {} to request files for signature {}...", peerAddressString, Base58.encode(signature)); + Peer peer = new Peer(peerData); + peer.addPendingSignatureRequest(signature); + return this.connectPeer(peer); + // If connection (and handshake) is successful, data will automatically be requested + } + else if (!isHandshaked) { + LOGGER.info("Peer {} is connected but not handshaked. Not attempting a new connection.", peerAddress); + return false; + } + + } catch (InterruptedException e) { + LOGGER.info("Interrupted when connecting to peer {}", peerAddress); + return false; + } + } + } + return false; + } + + private boolean requestDataFromConnectedPeer(Peer connectedPeer, byte[] signature) { + if (signature == null) { + // Nothing to do + return false; + } + + try (final Repository repository = RepositoryManager.getRepository()) { + return ArbitraryDataFileManager.getInstance().fetchAllArbitraryDataFiles(repository, connectedPeer, signature); + } catch (DataException e) { + LOGGER.info("Unable to fetch arbitrary data files"); + } + return false; + } + /** * Returns list of connected peers that have completed handshaking. */ @@ -648,14 +729,14 @@ public class Network { } } - private void connectPeer(Peer newPeer) throws InterruptedException { + private boolean connectPeer(Peer newPeer) throws InterruptedException { SocketChannel socketChannel = newPeer.connect(this.channelSelector); if (socketChannel == null) { - return; + return false; } if (Thread.currentThread().isInterrupted()) { - return; + return false; } synchronized (this.connectedPeers) { @@ -663,6 +744,8 @@ public class Network { } this.onPeerReady(newPeer); + + return true; } private Peer getPeerFromChannel(SocketChannel socketChannel) { @@ -913,6 +996,17 @@ public class Network { } } + // Process any pending signature requests, as this peer may have been connected for this purpose only + List pendingSignatureRequests = new ArrayList<>(peer.getPendingSignatureRequests()); + if (pendingSignatureRequests != null && !pendingSignatureRequests.isEmpty()) { + for (byte[] signature : pendingSignatureRequests) { + this.requestDataFromConnectedPeer(peer, signature); + peer.removePendingSignatureRequest(signature); + } + } + + // FUTURE: we may want to disconnect from this peer if we've finished requesting data from it + // Start regular pings peer.startPings(); @@ -1011,6 +1105,66 @@ public class Network { return new GetUnconfirmedTransactionsMessage(); } + + // External IP / peerAddress tracking + + public void ourPeerAddressUpdated(String peerAddress) { + if (peerAddress == null) { + return; + } + + String[] parts = peerAddress.split(":"); + if (parts.length != 2) { + return; + } + String host = parts[0]; + try { + InetAddress addr = InetAddress.getByName(host); + if (addr.isAnyLocalAddress() || addr.isSiteLocalAddress()) { + // Ignore local addresses + return; + } + } catch (UnknownHostException e) { + return; + } + + this.ourExternalIpAddressHistory.add(host); + + // Limit to 10 entries + while (this.ourExternalIpAddressHistory.size() > 10) { + this.ourExternalIpAddressHistory.remove(0); + } + + // If we've had 3 consecutive matching addresses, and they're different from + // our stored IP address value, treat it as updated. + + int size = this.ourExternalIpAddressHistory.size(); + if (size < 3) { + // Need at least 3 readings + return; + } + + String ip1 = this.ourExternalIpAddressHistory.get(size - 1); + String ip2 = this.ourExternalIpAddressHistory.get(size - 2); + String ip3 = this.ourExternalIpAddressHistory.get(size - 3); + + if (!Objects.equals(ip1, this.ourExternalIpAddress)) { + // Latest reading doesn't match our known value + if (Objects.equals(ip1, ip2) && Objects.equals(ip1, ip3)) { + // Last 3 readings were the same - i.e. more than one peer agreed on the new IP address + this.ourExternalIpAddress = ip1; + this.onExternalIpUpdate(ip1); + } + } + } + + public void onExternalIpUpdate(String ipAddress) { + LOGGER.info("External IP address updated to {}", ipAddress); + + ArbitraryDataManager.getInstance().broadcastHostedSignatureList(); + } + + // Peer-management calls public void noteToSelf(Peer peer) { diff --git a/src/main/java/org/qortal/network/Peer.java b/src/main/java/org/qortal/network/Peer.java index 4f7194b0..3b50b777 100644 --- a/src/main/java/org/qortal/network/Peer.java +++ b/src/main/java/org/qortal/network/Peer.java @@ -47,6 +47,11 @@ public class Peer { */ private static final int RESPONSE_TIMEOUT = 3000; // ms + /** + * Maximum time to wait for a peer to respond with blocks (ms) + */ + public static final int FETCH_BLOCKS_TIMEOUT = 10000; + /** * Interval between PING messages to a peer. (ms) *

@@ -99,6 +104,11 @@ public class Peer { private boolean syncInProgress = false; + + /* Pending signature requests */ + private List pendingSignatureRequests = Collections.synchronizedList(new ArrayList<>()); + + // Versioning public static final Pattern VERSION_PATTERN = Pattern.compile(Controller.VERSION_PREFIX + "(\\d{1,3})\\.(\\d{1,5})\\.(\\d{1,5})"); @@ -350,6 +360,34 @@ public class Peer { this.syncInProgress = syncInProgress; } + + // Pending signature requests + + public void addPendingSignatureRequest(byte[] signature) { + // Check if we already have this signature in the list + for (byte[] existingSignature : this.pendingSignatureRequests) { + if (Arrays.equals(existingSignature, signature )) { + return; + } + } + this.pendingSignatureRequests.add(signature); + } + + public void removePendingSignatureRequest(byte[] signature) { + Iterator iterator = this.pendingSignatureRequests.iterator(); + while (iterator.hasNext()) { + byte[] existingSignature = (byte[]) iterator.next(); + if (Arrays.equals(existingSignature, signature)) { + iterator.remove(); + } + } + } + + public List getPendingSignatureRequests() { + return this.pendingSignatureRequests; + } + + @Override public String toString() { // Easier, and nicer output, than peer.getRemoteSocketAddress() @@ -544,12 +582,22 @@ public class Peer { } /** - * Attempt to send Message to peer. + * Attempt to send Message to peer, using default RESPONSE_TIMEOUT. * * @param message message to be sent * @return true if message successfully sent; false otherwise */ public boolean sendMessage(Message message) { + return this.sendMessageWithTimeout(message, RESPONSE_TIMEOUT); + } + + /** + * Attempt to send Message to peer, using custom timeout. + * + * @param message message to be sent + * @return true if message successfully sent; false otherwise + */ + public boolean sendMessageWithTimeout(Message message, int timeout) { if (!this.socketChannel.isOpen()) { return false; } @@ -563,12 +611,14 @@ public class Peer { synchronized (this.socketChannel) { final long sendStart = System.currentTimeMillis(); + long totalBytes = 0; while (outputBuffer.hasRemaining()) { int bytesWritten = this.socketChannel.write(outputBuffer); + totalBytes += bytesWritten; - LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {}", this.peerConnectionId, - bytesWritten, message.getType().name(), message.getId(), this); + LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {} ({} total)", this.peerConnectionId, + bytesWritten, message.getType().name(), message.getId(), this, totalBytes); if (bytesWritten == 0) { // Underlying socket's internal buffer probably full, @@ -583,7 +633,7 @@ public class Peer { */ Thread.sleep(1L); //NOSONAR squid:S2276 - if (System.currentTimeMillis() - sendStart > RESPONSE_TIMEOUT) { + if (System.currentTimeMillis() - sendStart > timeout) { // We've taken too long to send this message return false; } @@ -604,7 +654,7 @@ public class Peer { } /** - * Send message to peer and await response. + * Send message to peer and await response, using default RESPONSE_TIMEOUT. *

* Message is assigned a random ID and sent. * If a response with matching ID is received then it is returned to caller. @@ -618,6 +668,24 @@ public class Peer { * @throws InterruptedException if interrupted while waiting */ public Message getResponse(Message message) throws InterruptedException { + return getResponseWithTimeout(message, RESPONSE_TIMEOUT); + } + + /** + * Send message to peer and await response. + *

+ * Message is assigned a random ID and sent. + * If a response with matching ID is received then it is returned to caller. + *

+ * If no response with matching ID within timeout, or some other error/exception occurs, + * then return null.
+ * (Assume peer will be rapidly disconnected after this). + * + * @param message message to send + * @return Message if valid response received; null if not or error/exception occurs + * @throws InterruptedException if interrupted while waiting + */ + public Message getResponseWithTimeout(Message message, int timeout) throws InterruptedException { BlockingQueue blockingQueue = new ArrayBlockingQueue<>(1); // Assign random ID to this message @@ -632,13 +700,13 @@ public class Peer { message.setId(id); // Try to send message - if (!this.sendMessage(message)) { + if (!this.sendMessageWithTimeout(message, timeout)) { this.replyQueues.remove(id); return null; } try { - return blockingQueue.poll(RESPONSE_TIMEOUT, TimeUnit.MILLISECONDS); + return blockingQueue.poll(timeout, TimeUnit.MILLISECONDS); } finally { this.replyQueues.remove(id); } diff --git a/src/main/java/org/qortal/network/message/ArbitraryDataFileListMessage.java b/src/main/java/org/qortal/network/message/ArbitraryDataFileListMessage.java new file mode 100644 index 00000000..008b3edd --- /dev/null +++ b/src/main/java/org/qortal/network/message/ArbitraryDataFileListMessage.java @@ -0,0 +1,90 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; +import org.qortal.utils.Serialization; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class ArbitraryDataFileListMessage extends Message { + + private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH; + private static final int HASH_LENGTH = Transformer.SHA256_LENGTH; + + private final byte[] signature; + private final List hashes; + + public ArbitraryDataFileListMessage(byte[] signature, List hashes) { + super(MessageType.ARBITRARY_DATA_FILE_LIST); + + this.signature = signature; + this.hashes = hashes; + } + + public ArbitraryDataFileListMessage(int id, byte[] signature, List hashes) { + super(id, MessageType.ARBITRARY_DATA_FILE_LIST); + + this.signature = signature; + this.hashes = hashes; + } + + public List getHashes() { + return this.hashes; + } + + public byte[] getSignature() { + return this.signature; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException, TransformationException { + byte[] signature = new byte[SIGNATURE_LENGTH]; + bytes.get(signature); + + int count = bytes.getInt(); + + if (bytes.remaining() != count * HASH_LENGTH) + return null; + + List hashes = new ArrayList<>(); + for (int i = 0; i < count; ++i) { + + byte[] hash = new byte[HASH_LENGTH]; + bytes.get(hash); + hashes.add(hash); + } + + return new ArbitraryDataFileListMessage(id, signature, hashes); + } + + @Override + protected byte[] toData() { + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + bytes.write(this.signature); + + bytes.write(Ints.toByteArray(this.hashes.size())); + + for (byte[] hash : this.hashes) { + bytes.write(hash); + } + + return bytes.toByteArray(); + } catch (IOException e) { + return null; + } + } + + public ArbitraryDataFileListMessage cloneWithNewId(int newId) { + ArbitraryDataFileListMessage clone = new ArbitraryDataFileListMessage(this.signature, this.hashes); + clone.setId(newId); + return clone; + } + +} diff --git a/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java b/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java new file mode 100644 index 00000000..d87e9685 --- /dev/null +++ b/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java @@ -0,0 +1,91 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.repository.DataException; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; + +public class ArbitraryDataFileMessage extends Message { + + private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH; + + private final byte[] signature; + private final ArbitraryDataFile arbitraryDataFile; + + public ArbitraryDataFileMessage(byte[] signature, ArbitraryDataFile arbitraryDataFile) { + super(MessageType.ARBITRARY_DATA_FILE); + + this.signature = signature; + this.arbitraryDataFile = arbitraryDataFile; + } + + public ArbitraryDataFileMessage(int id, byte[] signature, ArbitraryDataFile arbitraryDataFile) { + super(id, MessageType.ARBITRARY_DATA_FILE); + + this.signature = signature; + this.arbitraryDataFile = arbitraryDataFile; + } + + public ArbitraryDataFile getArbitraryDataFile() { + return this.arbitraryDataFile; + } + + public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws UnsupportedEncodingException { + byte[] signature = new byte[SIGNATURE_LENGTH]; + byteBuffer.get(signature); + + int dataLength = byteBuffer.getInt(); + + if (byteBuffer.remaining() != dataLength) + return null; + + byte[] data = new byte[dataLength]; + byteBuffer.get(data); + + try { + ArbitraryDataFile arbitraryDataFile = new ArbitraryDataFile(data, signature); + return new ArbitraryDataFileMessage(id, signature, arbitraryDataFile); + } + catch (DataException e) { + return null; + } + } + + @Override + protected byte[] toData() { + if (this.arbitraryDataFile == null) { + return null; + } + + byte[] data = this.arbitraryDataFile.getBytes(); + if (data == null) { + return null; + } + + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + bytes.write(signature); + + bytes.write(Ints.toByteArray(data.length)); + + bytes.write(data); + + return bytes.toByteArray(); + } catch (IOException e) { + return null; + } + } + + public ArbitraryDataFileMessage cloneWithNewId(int newId) { + ArbitraryDataFileMessage clone = new ArbitraryDataFileMessage(this.signature, this.arbitraryDataFile); + clone.setId(newId); + return clone; + } + +} diff --git a/src/main/java/org/qortal/network/message/ArbitrarySignaturesMessage.java b/src/main/java/org/qortal/network/message/ArbitrarySignaturesMessage.java new file mode 100644 index 00000000..379eeb47 --- /dev/null +++ b/src/main/java/org/qortal/network/message/ArbitrarySignaturesMessage.java @@ -0,0 +1,79 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.qortal.data.network.PeerData; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; +import org.qortal.utils.Serialization; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class ArbitrarySignaturesMessage extends Message { + + private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH; + + private String peerAddress; + private List signatures; + + public ArbitrarySignaturesMessage(String peerAddress, List signatures) { + this(-1, peerAddress, signatures); + } + + private ArbitrarySignaturesMessage(int id, String peerAddress, List signatures) { + super(id, MessageType.ARBITRARY_SIGNATURES); + + this.peerAddress = peerAddress; + this.signatures = signatures; + } + + public String getPeerAddress() { + return this.peerAddress; + } + + public List getSignatures() { + return this.signatures; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException, TransformationException { + String peerAddress = Serialization.deserializeSizedString(bytes, PeerData.MAX_PEER_ADDRESS_SIZE); + + int signatureCount = bytes.getInt(); + + if (bytes.remaining() != signatureCount * SIGNATURE_LENGTH) + return null; + + List signatures = new ArrayList<>(); + for (int i = 0; i < signatureCount; ++i) { + byte[] signature = new byte[SIGNATURE_LENGTH]; + bytes.get(signature); + signatures.add(signature); + } + + return new ArbitrarySignaturesMessage(id, peerAddress, signatures); + } + + @Override + protected byte[] toData() { + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + Serialization.serializeSizedString(bytes, this.peerAddress); + + bytes.write(Ints.toByteArray(this.signatures.size())); + + for (byte[] signature : this.signatures) + bytes.write(signature); + + return bytes.toByteArray(); + } catch (IOException e) { + return null; + } + } + +} diff --git a/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java b/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java new file mode 100644 index 00000000..e19bbb25 --- /dev/null +++ b/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java @@ -0,0 +1,82 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; + +import static org.qortal.transform.Transformer.INT_LENGTH; +import static org.qortal.transform.Transformer.LONG_LENGTH; + +public class GetArbitraryDataFileListMessage extends Message { + + private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH; + + private final byte[] signature; + private final long requestTime; + private int requestHops; + + public GetArbitraryDataFileListMessage(byte[] signature, long requestTime, int requestHops) { + this(-1, signature, requestTime, requestHops); + } + + private GetArbitraryDataFileListMessage(int id, byte[] signature, long requestTime, int requestHops) { + super(id, MessageType.GET_ARBITRARY_DATA_FILE_LIST); + + this.signature = signature; + this.requestTime = requestTime; + this.requestHops = requestHops; + } + + public byte[] getSignature() { + return this.signature; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException { + if (bytes.remaining() != SIGNATURE_LENGTH + LONG_LENGTH + INT_LENGTH) + return null; + + byte[] signature = new byte[SIGNATURE_LENGTH]; + + bytes.get(signature); + + long requestTime = bytes.getLong(); + + int requestHops = bytes.getInt(); + + return new GetArbitraryDataFileListMessage(id, signature, requestTime, requestHops); + } + + @Override + protected byte[] toData() { + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + bytes.write(this.signature); + + bytes.write(Longs.toByteArray(this.requestTime)); + + bytes.write(Ints.toByteArray(this.requestHops)); + + return bytes.toByteArray(); + } catch (IOException e) { + return null; + } + } + + public long getRequestTime() { + return this.requestTime; + } + + public int getRequestHops() { + return this.requestHops; + } + public void setRequestHops(int requestHops) { + this.requestHops = requestHops; + } + +} diff --git a/src/main/java/org/qortal/network/message/GetArbitraryDataFileMessage.java b/src/main/java/org/qortal/network/message/GetArbitraryDataFileMessage.java new file mode 100644 index 00000000..809b983d --- /dev/null +++ b/src/main/java/org/qortal/network/message/GetArbitraryDataFileMessage.java @@ -0,0 +1,66 @@ +package org.qortal.network.message; + +import org.qortal.transform.Transformer; +import org.qortal.transform.transaction.TransactionTransformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; + +public class GetArbitraryDataFileMessage extends Message { + + private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH; + private static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH; + + private final byte[] signature; + private final byte[] hash; + + public GetArbitraryDataFileMessage(byte[] signature, byte[] hash) { + this(-1, signature, hash); + } + + private GetArbitraryDataFileMessage(int id, byte[] signature, byte[] hash) { + super(id, MessageType.GET_ARBITRARY_DATA_FILE); + + this.signature = signature; + this.hash = hash; + } + + public byte[] getSignature() { + return this.signature; + } + + public byte[] getHash() { + return this.hash; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException { + if (bytes.remaining() != HASH_LENGTH + SIGNATURE_LENGTH) + return null; + + byte[] signature = new byte[SIGNATURE_LENGTH]; + bytes.get(signature); + + byte[] hash = new byte[HASH_LENGTH]; + bytes.get(hash); + + return new GetArbitraryDataFileMessage(id, signature, hash); + } + + @Override + protected byte[] toData() { + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + bytes.write(this.signature); + + bytes.write(this.hash); + + return bytes.toByteArray(); + } catch (IOException e) { + return null; + } + } + +} diff --git a/src/main/java/org/qortal/network/message/HelloMessage.java b/src/main/java/org/qortal/network/message/HelloMessage.java index 537daf48..1b6de17d 100644 --- a/src/main/java/org/qortal/network/message/HelloMessage.java +++ b/src/main/java/org/qortal/network/message/HelloMessage.java @@ -13,16 +13,18 @@ public class HelloMessage extends Message { private final long timestamp; private final String versionString; + private final String senderPeerAddress; - private HelloMessage(int id, long timestamp, String versionString) { + private HelloMessage(int id, long timestamp, String versionString, String senderPeerAddress) { super(id, MessageType.HELLO); this.timestamp = timestamp; this.versionString = versionString; + this.senderPeerAddress = senderPeerAddress; } - public HelloMessage(long timestamp, String versionString) { - this(-1, timestamp, versionString); + public HelloMessage(long timestamp, String versionString, String senderPeerAddress) { + this(-1, timestamp, versionString, senderPeerAddress); } public long getTimestamp() { @@ -33,12 +35,22 @@ public class HelloMessage extends Message { return this.versionString; } + public String getSenderPeerAddress() { + return this.senderPeerAddress; + } + public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws TransformationException { long timestamp = byteBuffer.getLong(); String versionString = Serialization.deserializeSizedString(byteBuffer, 255); - return new HelloMessage(id, timestamp, versionString); + // Sender peer address added in v3.0, so is an optional field. Older versions won't send it. + String senderPeerAddress = null; + if (byteBuffer.hasRemaining()) { + senderPeerAddress = Serialization.deserializeSizedString(byteBuffer, 255); + } + + return new HelloMessage(id, timestamp, versionString, senderPeerAddress); } @Override @@ -49,6 +61,8 @@ public class HelloMessage extends Message { Serialization.serializeSizedString(bytes, this.versionString); + Serialization.serializeSizedString(bytes, this.senderPeerAddress); + return bytes.toByteArray(); } diff --git a/src/main/java/org/qortal/network/message/Message.java b/src/main/java/org/qortal/network/message/Message.java index cc90fe81..c7657493 100644 --- a/src/main/java/org/qortal/network/message/Message.java +++ b/src/main/java/org/qortal/network/message/Message.java @@ -25,7 +25,7 @@ public abstract class Message { private static final int MAGIC_LENGTH = 4; private static final int CHECKSUM_LENGTH = 4; - private static final int MAX_DATA_SIZE = 1024 * 1024; // 1MB + private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB @SuppressWarnings("serial") public static class MessageException extends Exception { @@ -80,7 +80,18 @@ public abstract class Message { GET_ONLINE_ACCOUNTS(81), ARBITRARY_DATA(90), - GET_ARBITRARY_DATA(91); + GET_ARBITRARY_DATA(91), + + BLOCKS(100), + GET_BLOCKS(101), + + ARBITRARY_DATA_FILE(110), + GET_ARBITRARY_DATA_FILE(111), + + ARBITRARY_DATA_FILE_LIST(120), + GET_ARBITRARY_DATA_FILE_LIST(121), + + ARBITRARY_SIGNATURES(130); public final int value; public final Method fromByteBufferMethod; diff --git a/src/main/java/org/qortal/payment/Payment.java b/src/main/java/org/qortal/payment/Payment.java index cd7f1118..8b6070ee 100644 --- a/src/main/java/org/qortal/payment/Payment.java +++ b/src/main/java/org/qortal/payment/Payment.java @@ -40,8 +40,9 @@ public class Payment { public ValidationResult isValid(byte[] senderPublicKey, List payments, long fee, boolean isZeroAmountValid) throws DataException { AssetRepository assetRepository = this.repository.getAssetRepository(); - // Check fee is positive - if (fee <= 0) + // Check fee is positive or zero + // We have already checked that the fee is correct in the Transaction superclass + if (fee < 0) return ValidationResult.NEGATIVE_FEE; // Total up payment amounts by assetId diff --git a/src/main/java/org/qortal/repository/ArbitraryRepository.java b/src/main/java/org/qortal/repository/ArbitraryRepository.java index 80f8c1e3..ba3ba1d8 100644 --- a/src/main/java/org/qortal/repository/ArbitraryRepository.java +++ b/src/main/java/org/qortal/repository/ArbitraryRepository.java @@ -1,6 +1,13 @@ package org.qortal.repository; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.arbitrary.ArbitraryResourceInfo; +import org.qortal.data.arbitrary.ArbitraryResourceNameInfo; +import org.qortal.data.network.ArbitraryPeerData; import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.*; + +import java.util.List; public interface ArbitraryRepository { @@ -12,4 +19,26 @@ public interface ArbitraryRepository { public void delete(ArbitraryTransactionData arbitraryTransactionData) throws DataException; + public List getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException; + + public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException; + + + public List getArbitraryResources(Service service, String identifier, String name, boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException; + + public List getArbitraryResourceCreatorNames(Service service, String identifier, boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException; + + + public List getArbitraryPeerDataForSignature(byte[] signature) throws DataException; + + public ArbitraryPeerData getArbitraryPeerDataForSignatureAndPeer(byte[] signature, String peerAddress) throws DataException; + + public ArbitraryPeerData getArbitraryPeerDataForSignatureAndHost(byte[] signature, String host) throws DataException; + + public void save(ArbitraryPeerData arbitraryPeerData) throws DataException; + + public void delete(ArbitraryPeerData arbitraryPeerData) throws DataException; + + public void deleteArbitraryPeersWithSignature(byte[] signature) throws DataException; + } diff --git a/src/main/java/org/qortal/repository/TransactionRepository.java b/src/main/java/org/qortal/repository/TransactionRepository.java index 4e5999eb..b0e3a864 100644 --- a/src/main/java/org/qortal/repository/TransactionRepository.java +++ b/src/main/java/org/qortal/repository/TransactionRepository.java @@ -5,6 +5,7 @@ import java.util.List; import java.util.Map; import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.misc.Service; import org.qortal.data.group.GroupApprovalData; import org.qortal.data.transaction.GroupApprovalTransactionData; import org.qortal.data.transaction.TransactionData; @@ -70,8 +71,8 @@ public interface TransactionRepository { * @throws DataException */ public List getSignaturesMatchingCriteria(Integer startBlock, Integer blockLimit, Integer txGroupId, - List txTypes, Integer service, String address, - ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException; + List txTypes, Service service, String name, String address, + ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException; /** * Returns signatures for transactions that match search criteria. diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBArbitraryRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBArbitraryRepository.java index 3d99bbb3..b0b806b7 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBArbitraryRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBArbitraryRepository.java @@ -1,58 +1,39 @@ package org.qortal.repository.hsqldb; -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.DirectoryNotEmptyException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; - +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.arbitrary.ArbitraryResourceInfo; import org.qortal.crypto.Crypto; +import org.qortal.data.arbitrary.ArbitraryResourceNameInfo; +import org.qortal.data.network.ArbitraryPeerData; import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.data.transaction.BaseTransactionData; import org.qortal.data.transaction.TransactionData; -import org.qortal.data.transaction.ArbitraryTransactionData.DataType; import org.qortal.repository.ArbitraryRepository; import org.qortal.repository.DataException; -import org.qortal.settings.Settings; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.transaction.Transaction.ApprovalStatus; import org.qortal.utils.Base58; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + public class HSQLDBArbitraryRepository implements ArbitraryRepository { + private static final Logger LOGGER = LogManager.getLogger(HSQLDBArbitraryRepository.class); + private static final int MAX_RAW_DATA_SIZE = 255; // size of VARBINARY protected HSQLDBRepository repository; - + public HSQLDBArbitraryRepository(HSQLDBRepository repository) { this.repository = repository; } - /** - * Returns pathname for saving arbitrary transaction data payloads. - *

- * Format: arbitrary//.raw - * - * @param arbitraryTransactionData - * @return - */ - public static String buildPathname(ArbitraryTransactionData arbitraryTransactionData) { - String senderAddress = Crypto.toAddress(arbitraryTransactionData.getSenderPublicKey()); - - StringBuilder stringBuilder = new StringBuilder(1024); - - stringBuilder.append(Settings.getInstance().getUserPath()); - stringBuilder.append("arbitrary"); - stringBuilder.append(File.separator); - stringBuilder.append(senderAddress); - stringBuilder.append(File.separator); - stringBuilder.append(arbitraryTransactionData.getService()); - stringBuilder.append(File.separator); - stringBuilder.append(Base58.encode(arbitraryTransactionData.getSignature())); - stringBuilder.append(".raw"); - - return stringBuilder.toString(); - } - private ArbitraryTransactionData getTransactionData(byte[] signature) throws DataException { TransactionData transactionData = this.repository.getTransactionRepository().fromSignature(signature); if (transactionData == null) @@ -64,99 +45,529 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository { @Override public boolean isDataLocal(byte[] signature) throws DataException { ArbitraryTransactionData transactionData = getTransactionData(signature); - if (transactionData == null) + if (transactionData == null) { return false; + } // Raw data is always available - if (transactionData.getDataType() == DataType.RAW_DATA) + if (transactionData.getDataType() == DataType.RAW_DATA) { return true; + } - String dataPathname = buildPathname(transactionData); + // Load hashes + byte[] hash = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); - Path dataPath = Paths.get(dataPathname); - return Files.exists(dataPath); + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + // Check if we already have the complete data file or all chunks + if (arbitraryDataFile.allFilesExist()) { + return true; + } + + return false; } @Override - public byte[] fetchData(byte[] signature) throws DataException { - ArbitraryTransactionData transactionData = getTransactionData(signature); - if (transactionData == null) - return null; - - // Raw data is always available - if (transactionData.getDataType() == DataType.RAW_DATA) - return transactionData.getData(); - - String dataPathname = buildPathname(transactionData); - - Path dataPath = Paths.get(dataPathname); + public byte[] fetchData(byte[] signature) { try { - return Files.readAllBytes(dataPath); - } catch (IOException e) { + ArbitraryTransactionData transactionData = getTransactionData(signature); + if (transactionData == null) { + return null; + } + + // Raw data is always available + if (transactionData.getDataType() == DataType.RAW_DATA) { + return transactionData.getData(); + } + + // Load hashes + byte[] digest = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + // If we have the complete data file, return it + if (arbitraryDataFile.exists()) { + // Ensure the file's size matches the size reported by the transaction (throws a DataException if not) + arbitraryDataFile.validateFileSize(transactionData.getSize()); + + return arbitraryDataFile.getBytes(); + } + + // Alternatively, if we have all the chunks, combine them into a single file + if (arbitraryDataFile.allChunksExist()) { + arbitraryDataFile.join(); + + // Verify that the combined hash matches the expected hash + if (!digest.equals(arbitraryDataFile.digest())) { + LOGGER.info(String.format("Hash mismatch for transaction: %s", Base58.encode(signature))); + return null; + } + + // Ensure the file's size matches the size reported by the transaction + arbitraryDataFile.validateFileSize(transactionData.getSize()); + + return arbitraryDataFile.getBytes(); + } + + } catch (DataException e) { + LOGGER.info("Unable to fetch data for transaction {}: {}", Base58.encode(signature), e.getMessage()); return null; } + + return null; } @Override public void save(ArbitraryTransactionData arbitraryTransactionData) throws DataException { // Already hashed? Nothing to do - if (arbitraryTransactionData.getDataType() == DataType.DATA_HASH) + if (arbitraryTransactionData.getDataType() == DataType.DATA_HASH) { return; + } // Trivial-sized payloads can remain in raw form - if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= MAX_RAW_DATA_SIZE) + if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= MAX_RAW_DATA_SIZE) { return; - - // Store non-trivial payloads in filesystem and convert transaction's data to hash form - byte[] rawData = arbitraryTransactionData.getData(); - - // Calculate hash of data and update our transaction to use that - byte[] dataHash = Crypto.digest(rawData); - arbitraryTransactionData.setData(dataHash); - arbitraryTransactionData.setDataType(DataType.DATA_HASH); - - String dataPathname = buildPathname(arbitraryTransactionData); - - Path dataPath = Paths.get(dataPathname); - - // Make sure directory structure exists - try { - Files.createDirectories(dataPath.getParent()); - } catch (IOException e) { - throw new DataException("Unable to create arbitrary transaction directory", e); } - // Output actual transaction data - try (OutputStream dataOut = Files.newOutputStream(dataPath)) { - dataOut.write(rawData); - } catch (IOException e) { - throw new DataException("Unable to store arbitrary transaction data", e); - } + throw new IllegalStateException(String.format("Supplied data is larger than maximum size (%d bytes). Please use ArbitraryDataWriter.", MAX_RAW_DATA_SIZE)); } @Override public void delete(ArbitraryTransactionData arbitraryTransactionData) throws DataException { // No need to do anything if we still only have raw data, and hence nothing saved in filesystem - if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA) + if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA) { return; + } - String dataPathname = buildPathname(arbitraryTransactionData); - Path dataPath = Paths.get(dataPathname); - try { - Files.deleteIfExists(dataPath); + // Load hashes + byte[] hash = arbitraryTransactionData.getData(); + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); - // Also attempt to delete parent directory if empty - Path servicePath = dataPath.getParent(); - Files.deleteIfExists(servicePath); + // Load data file(s) + byte[] signature = arbitraryTransactionData.getSignature(); + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + arbitraryDataFile.setMetadataHash(metadataHash); - // Also attempt to delete parent directory if empty - Path senderpath = servicePath.getParent(); - Files.deleteIfExists(senderpath); - } catch (DirectoryNotEmptyException e) { - // One of the parent service/sender directories still has data from other transactions - this is OK - } catch (IOException e) { - throw new DataException("Unable to delete arbitrary transaction data", e); + // Delete file and chunks + arbitraryDataFile.deleteAll(); + } + + @Override + public List getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException { + String sql = "SELECT type, reference, signature, creator, created_when, fee, " + + "tx_group_id, block_height, approval_status, approval_height, " + + "version, nonce, service, size, is_data_raw, data, metadata_hash, " + + "name, identifier, update_method, secret, compression FROM ArbitraryTransactions " + + "JOIN Transactions USING (signature) " + + "WHERE lower(name) = ? AND service = ?" + + "AND (identifier = ? OR (identifier IS NULL AND ? IS NULL))" + + "AND created_when >= ? ORDER BY created_when ASC"; + List arbitraryTransactionData = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql, name.toLowerCase(), service.value, identifier, identifier, since)) { + if (resultSet == null) + return null; + + do { + //TransactionType type = TransactionType.valueOf(resultSet.getInt(1)); + + byte[] reference = resultSet.getBytes(2); + byte[] signature = resultSet.getBytes(3); + byte[] creatorPublicKey = resultSet.getBytes(4); + long timestamp = resultSet.getLong(5); + + Long fee = resultSet.getLong(6); + if (fee == 0 && resultSet.wasNull()) + fee = null; + + int txGroupId = resultSet.getInt(7); + + Integer blockHeight = resultSet.getInt(8); + if (blockHeight == 0 && resultSet.wasNull()) + blockHeight = null; + + ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9)); + Integer approvalHeight = resultSet.getInt(10); + if (approvalHeight == 0 && resultSet.wasNull()) + approvalHeight = null; + + BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature); + + int version = resultSet.getInt(11); + int nonce = resultSet.getInt(12); + Service serviceResult = Service.valueOf(resultSet.getInt(13)); + int size = resultSet.getInt(14); + boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false + DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH; + byte[] data = resultSet.getBytes(16); + byte[] metadataHash = resultSet.getBytes(17); + String nameResult = resultSet.getString(18); + String identifierResult = resultSet.getString(19); + Method method = Method.valueOf(resultSet.getInt(20)); + byte[] secret = resultSet.getBytes(21); + Compression compression = Compression.valueOf(resultSet.getInt(22)); + // FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls. + + ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData, + version, serviceResult, nonce, size, nameResult, identifierResult, method, secret, + compression, data, dataType, metadataHash, null); + + arbitraryTransactionData.add(transactionData); + } while (resultSet.next()); + + return arbitraryTransactionData; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary transactions from repository", e); } } + @Override + public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException { + StringBuilder sql = new StringBuilder(1024); + + sql.append("SELECT type, reference, signature, creator, created_when, fee, " + + "tx_group_id, block_height, approval_status, approval_height, " + + "version, nonce, service, size, is_data_raw, data, metadata_hash, " + + "name, identifier, update_method, secret, compression FROM ArbitraryTransactions " + + "JOIN Transactions USING (signature) " + + "WHERE lower(name) = ? AND service = ? " + + "AND (identifier = ? OR (identifier IS NULL AND ? IS NULL))"); + + if (method != null) { + sql.append(" AND update_method = "); + sql.append(method.value); + } + + sql.append("ORDER BY created_when DESC LIMIT 1"); + + try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), name.toLowerCase(), service.value, identifier, identifier)) { + if (resultSet == null) + return null; + + //TransactionType type = TransactionType.valueOf(resultSet.getInt(1)); + + byte[] reference = resultSet.getBytes(2); + byte[] signature = resultSet.getBytes(3); + byte[] creatorPublicKey = resultSet.getBytes(4); + long timestamp = resultSet.getLong(5); + + Long fee = resultSet.getLong(6); + if (fee == 0 && resultSet.wasNull()) + fee = null; + + int txGroupId = resultSet.getInt(7); + + Integer blockHeight = resultSet.getInt(8); + if (blockHeight == 0 && resultSet.wasNull()) + blockHeight = null; + + ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9)); + Integer approvalHeight = resultSet.getInt(10); + if (approvalHeight == 0 && resultSet.wasNull()) + approvalHeight = null; + + BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature); + + int version = resultSet.getInt(11); + int nonce = resultSet.getInt(12); + Service serviceResult = Service.valueOf(resultSet.getInt(13)); + int size = resultSet.getInt(14); + boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false + DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH; + byte[] data = resultSet.getBytes(16); + byte[] metadataHash = resultSet.getBytes(17); + String nameResult = resultSet.getString(18); + String identifierResult = resultSet.getString(19); + Method methodResult = Method.valueOf(resultSet.getInt(20)); + byte[] secret = resultSet.getBytes(21); + Compression compression = Compression.valueOf(resultSet.getInt(22)); + // FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls. + + ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData, + version, serviceResult, nonce, size, nameResult, identifierResult, methodResult, secret, + compression, data, dataType, metadataHash, null); + + return transactionData; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary transactions from repository", e); + } + } + + @Override + public List getArbitraryResources(Service service, String identifier, String name, + boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException { + StringBuilder sql = new StringBuilder(512); + List bindParams = new ArrayList<>(); + + sql.append("SELECT name, service, identifier FROM ArbitraryTransactions WHERE 1=1"); + + if (service != null) { + sql.append(" AND service = "); + sql.append(service.value); + } + + if (defaultResource) { + // Default resource requested - use NULL identifier + sql.append(" AND identifier IS NULL"); + } + else { + // Non-default resource requested + // Use an exact match identifier, or list all if supplied identifier is null + sql.append(" AND (identifier = ? OR (? IS NULL))"); + bindParams.add(identifier); + bindParams.add(identifier); + } + + if (name != null) { + sql.append(" AND name = ?"); + bindParams.add(name); + } + + sql.append(" GROUP BY name, service, identifier ORDER BY name"); + + if (reverse != null && reverse) { + sql.append(" DESC"); + } + + HSQLDBRepository.limitOffsetSql(sql, limit, offset); + + List arbitraryResources = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), bindParams.toArray())) { + if (resultSet == null) + return null; + + do { + String nameResult = resultSet.getString(1); + Service serviceResult = Service.valueOf(resultSet.getInt(2)); + String identifierResult = resultSet.getString(3); + + // We should filter out resources without names + if (nameResult == null) { + continue; + } + + ArbitraryResourceInfo arbitraryResourceInfo = new ArbitraryResourceInfo(); + arbitraryResourceInfo.name = nameResult; + arbitraryResourceInfo.service = serviceResult; + arbitraryResourceInfo.identifier = identifierResult; + + arbitraryResources.add(arbitraryResourceInfo); + } while (resultSet.next()); + + return arbitraryResources; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary transactions from repository", e); + } + } + + @Override + public List getArbitraryResourceCreatorNames(Service service, String identifier, + boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException { + StringBuilder sql = new StringBuilder(512); + + sql.append("SELECT name FROM ArbitraryTransactions WHERE 1=1"); + + if (service != null) { + sql.append(" AND service = "); + sql.append(service.value); + } + + if (defaultResource) { + // Default resource requested - use NULL identifier + // The AND ? IS NULL AND ? IS NULL is a hack to make use of the identifier params in checkedExecute() + identifier = null; + sql.append(" AND (identifier IS NULL AND ? IS NULL AND ? IS NULL)"); + } + else { + // Non-default resource requested + // Use an exact match identifier, or list all if supplied identifier is null + sql.append(" AND (identifier = ? OR (? IS NULL))"); + } + + sql.append(" GROUP BY name ORDER BY name"); + + if (reverse != null && reverse) { + sql.append(" DESC"); + } + + HSQLDBRepository.limitOffsetSql(sql, limit, offset); + + List arbitraryResources = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), identifier, identifier)) { + if (resultSet == null) + return null; + + do { + String name = resultSet.getString(1); + + // We should filter out resources without names + if (name == null) { + continue; + } + + ArbitraryResourceNameInfo arbitraryResourceNameInfo = new ArbitraryResourceNameInfo(); + arbitraryResourceNameInfo.name = name; + + arbitraryResources.add(arbitraryResourceNameInfo); + } while (resultSet.next()); + + return arbitraryResources; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary transactions from repository", e); + } + } + + + // Peer file tracking + + /** + * Fetch a list of peers that have reported to be holding chunks related to + * supplied transaction signature. + * @param signature + * @return a list of ArbitraryPeerData objects, or null if none found + * @throws DataException + */ + @Override + public List getArbitraryPeerDataForSignature(byte[] signature) throws DataException { + // Hash the signature so it fits within 32 bytes + byte[] hashedSignature = Crypto.digest(signature); + + String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " + + "FROM ArbitraryPeers " + + "WHERE hash = ?"; + + List arbitraryPeerData = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature)) { + if (resultSet == null) + return null; + + do { + byte[] hash = resultSet.getBytes(1); + String peerAddr = resultSet.getString(2); + Integer successes = resultSet.getInt(3); + Integer failures = resultSet.getInt(4); + Long lastAttempted = resultSet.getLong(5); + Long lastRetrieved = resultSet.getLong(6); + + ArbitraryPeerData peerData = new ArbitraryPeerData(hash, peerAddr, successes, failures, + lastAttempted, lastRetrieved); + + arbitraryPeerData.add(peerData); + } while (resultSet.next()); + + return arbitraryPeerData; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary peer data from repository", e); + } + } + + public ArbitraryPeerData getArbitraryPeerDataForSignatureAndPeer(byte[] signature, String peerAddress) throws DataException { + // Hash the signature so it fits within 32 bytes + byte[] hashedSignature = Crypto.digest(signature); + + String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " + + "FROM ArbitraryPeers " + + "WHERE hash = ? AND peer_address = ?"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature, peerAddress)) { + if (resultSet == null) + return null; + + byte[] hash = resultSet.getBytes(1); + String peerAddr = resultSet.getString(2); + Integer successes = resultSet.getInt(3); + Integer failures = resultSet.getInt(4); + Long lastAttempted = resultSet.getLong(5); + Long lastRetrieved = resultSet.getLong(6); + + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(hash, peerAddr, successes, failures, + lastAttempted, lastRetrieved); + + return arbitraryPeerData; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary peer data from repository", e); + } + } + + public ArbitraryPeerData getArbitraryPeerDataForSignatureAndHost(byte[] signature, String host) throws DataException { + // Hash the signature so it fits within 32 bytes + byte[] hashedSignature = Crypto.digest(signature); + + // Create a host wildcard string which allows any port + String hostWildcard = String.format("%s:%%", host); + + String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " + + "FROM ArbitraryPeers " + + "WHERE hash = ? AND peer_address LIKE ?"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature, hostWildcard)) { + if (resultSet == null) + return null; + + byte[] hash = resultSet.getBytes(1); + String peerAddr = resultSet.getString(2); + Integer successes = resultSet.getInt(3); + Integer failures = resultSet.getInt(4); + Long lastAttempted = resultSet.getLong(5); + Long lastRetrieved = resultSet.getLong(6); + + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(hash, peerAddr, successes, failures, + lastAttempted, lastRetrieved); + + return arbitraryPeerData; + } catch (SQLException e) { + throw new DataException("Unable to fetch arbitrary peer data from repository", e); + } + } + + @Override + public void save(ArbitraryPeerData arbitraryPeerData) throws DataException { + HSQLDBSaver saveHelper = new HSQLDBSaver("ArbitraryPeers"); + + saveHelper.bind("hash", arbitraryPeerData.getHash()) + .bind("peer_address", arbitraryPeerData.getPeerAddress()) + .bind("successes", arbitraryPeerData.getSuccesses()) + .bind("failures", arbitraryPeerData.getFailures()) + .bind("last_attempted", arbitraryPeerData.getLastAttempted()) + .bind("last_retrieved", arbitraryPeerData.getLastRetrieved()); + + try { + saveHelper.execute(this.repository); + } catch (SQLException e) { + throw new DataException("Unable to save ArbitraryPeerData into repository", e); + } + } + + @Override + public void delete(ArbitraryPeerData arbitraryPeerData) throws DataException { + try { + // Remove peer/hash combination + this.repository.delete("ArbitraryPeers", "hash = ? AND peer_address = ?", + arbitraryPeerData.getHash(), arbitraryPeerData.getPeerAddress()); + + } catch (SQLException e) { + throw new DataException("Unable to delete arbitrary peer data from repository", e); + } + } + + @Override + public void deleteArbitraryPeersWithSignature(byte[] signature) throws DataException { + byte[] hash = Crypto.digest(signature); + try { + // Remove all records of peers hosting supplied signature + this.repository.delete("ArbitraryPeers", "hash = ?", hash); + + } catch (SQLException e) { + throw new DataException("Unable to delete arbitrary peer data from repository", e); + } + } } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java index e28e9114..065cfd0d 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java @@ -286,7 +286,6 @@ public class HSQLDBDatabaseUpdates { + "service SMALLINT NOT NULL, is_data_raw BOOLEAN NOT NULL, data ArbitraryData NOT NULL, " + TRANSACTION_KEYS + ")"); // NB: Actual data payload stored elsewhere - // For the future: data payload should be encrypted, at the very least with transaction's reference as the seed for the encryption key break; case 8: @@ -899,6 +898,53 @@ public class HSQLDBDatabaseUpdates { stmt.execute("SET TABLE BlockArchive NEW SPACE"); break; + case 37: + // ARBITRARY transaction updates for off-chain data storage + + // We may want to use a nonce rather than a transaction fee on the data chain + stmt.execute("ALTER TABLE ArbitraryTransactions ADD nonce INT NOT NULL DEFAULT 0"); + // We need to know the total size of the data file(s) associated with each transaction + stmt.execute("ALTER TABLE ArbitraryTransactions ADD size INT NOT NULL DEFAULT 0"); + // Larger data files need to be split into chunks, for easier transmission and greater decentralization + // We store their hashes (and possibly other things) in a metadata file + stmt.execute("ALTER TABLE ArbitraryTransactions ADD metadata_hash VARBINARY(32)"); + // For finding transactions by file hash + stmt.execute("CREATE INDEX ArbitraryDataIndex ON ArbitraryTransactions (is_data_raw, data)"); + break; + + case 38: + // We need the ability for arbitrary transactions to be associated with a name + stmt.execute("ALTER TABLE ArbitraryTransactions ADD name RegisteredName"); + // A "method" specifies how the data should be applied (e.g. PUT or PATCH) + stmt.execute("ALTER TABLE ArbitraryTransactions ADD update_method INTEGER NOT NULL DEFAULT 0"); + // For public data, the AES shared secret needs to be available. This is more for data obfuscation as apposed to actual encryption. + stmt.execute("ALTER TABLE ArbitraryTransactions ADD secret VARBINARY(32)"); + // We want to support compressed and uncompressed data, as well as different compression algorithms + stmt.execute("ALTER TABLE ArbitraryTransactions ADD compression INTEGER NOT NULL DEFAULT 0"); + // An optional identifier string can be used to allow more than one resource per user/service combo + stmt.execute("ALTER TABLE ArbitraryTransactions ADD identifier VARCHAR(64)"); + // For finding transactions by registered name + stmt.execute("CREATE INDEX ArbitraryNameIndex ON ArbitraryTransactions (name)"); + break; + + case 39: + // Add DHT-style lookup table to track file locations + // This maps ARBITRARY transactions to peer addresses, but also includes additional metadata to + // track the local success rate and reachability. It is keyed by a "hash" column, to keep it + // generic, as this way we aren't limited to transaction signatures only. + // Multiple rows with the same hash are allowed, to allow for metadata. Longer term it could be + // reshaped to one row per hash if this is too verbose. + // Transaction signatures are hashed to 32 bytes using SHA256. In doing this we lose the ability + // to join against transaction tables, but on balance the space savings seem more important. + stmt.execute("CREATE TABLE ArbitraryPeers (hash VARBINARY(32) NOT NULL, " + + "peer_address VARCHAR(255), successes INTEGER NOT NULL, failures INTEGER NOT NULL, " + + "last_attempted EpochMillis NOT NULL, last_retrieved EpochMillis NOT NULL, " + + "PRIMARY KEY (hash, peer_address))"); + + // For finding peers by data hash + stmt.execute("CREATE INDEX ArbitraryPeersHashIndex ON ArbitraryPeers (hash)"); + break; + default: // nothing to do return false; diff --git a/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBArbitraryTransactionRepository.java b/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBArbitraryTransactionRepository.java index 804b2b10..d7fc27b4 100644 --- a/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBArbitraryTransactionRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBArbitraryTransactionRepository.java @@ -4,6 +4,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; +import org.qortal.arbitrary.misc.Service; import org.qortal.data.PaymentData; import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.BaseTransactionData; @@ -20,21 +21,31 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos } TransactionData fromBase(BaseTransactionData baseTransactionData) throws DataException { - String sql = "SELECT version, service, is_data_raw, data from ArbitraryTransactions WHERE signature = ?"; + String sql = "SELECT version, nonce, service, size, is_data_raw, data, metadata_hash, " + + "name, identifier, update_method, secret, compression from ArbitraryTransactions " + + "WHERE signature = ?"; try (ResultSet resultSet = this.repository.checkedExecute(sql, baseTransactionData.getSignature())) { if (resultSet == null) return null; int version = resultSet.getInt(1); - int service = resultSet.getInt(2); - boolean isDataRaw = resultSet.getBoolean(3); // NOT NULL, so no null to false + int nonce = resultSet.getInt(2); + Service service = Service.valueOf(resultSet.getInt(3)); + int size = resultSet.getInt(4); + boolean isDataRaw = resultSet.getBoolean(5); // NOT NULL, so no null to false DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH; - byte[] data = resultSet.getBytes(4); + byte[] data = resultSet.getBytes(6); + byte[] metadataHash = resultSet.getBytes(7); + String name = resultSet.getString(8); + String identifier = resultSet.getString(9); + ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.valueOf(resultSet.getInt(10)); + byte[] secret = resultSet.getBytes(11); + ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.valueOf(resultSet.getInt(12)); List payments = this.getPaymentsFromSignature(baseTransactionData.getSignature()); - - return new ArbitraryTransactionData(baseTransactionData, version, service, data, dataType, payments); + return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name, + identifier, method, secret, compression, data, dataType, metadataHash, payments); } catch (SQLException e) { throw new DataException("Unable to fetch arbitrary transaction from repository", e); } @@ -51,8 +62,12 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos HSQLDBSaver saveHelper = new HSQLDBSaver("ArbitraryTransactions"); saveHelper.bind("signature", arbitraryTransactionData.getSignature()).bind("sender", arbitraryTransactionData.getSenderPublicKey()) - .bind("version", arbitraryTransactionData.getVersion()).bind("service", arbitraryTransactionData.getService()) - .bind("is_data_raw", arbitraryTransactionData.getDataType() == DataType.RAW_DATA).bind("data", arbitraryTransactionData.getData()); + .bind("version", arbitraryTransactionData.getVersion()).bind("service", arbitraryTransactionData.getService().value) + .bind("nonce", arbitraryTransactionData.getNonce()).bind("size", arbitraryTransactionData.getSize()) + .bind("is_data_raw", arbitraryTransactionData.getDataType() == DataType.RAW_DATA).bind("data", arbitraryTransactionData.getData()) + .bind("metadata_hash", arbitraryTransactionData.getMetadataHash()).bind("name", arbitraryTransactionData.getName()) + .bind("identifier", arbitraryTransactionData.getIdentifier()).bind("update_method", arbitraryTransactionData.getMethod().value) + .bind("secret", arbitraryTransactionData.getSecret()).bind("compression", arbitraryTransactionData.getCompression().value); try { saveHelper.execute(this.repository); diff --git a/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBTransactionRepository.java b/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBTransactionRepository.java index a8062e2d..e326b498 100644 --- a/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBTransactionRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/transaction/HSQLDBTransactionRepository.java @@ -16,6 +16,7 @@ import java.util.Map; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.misc.Service; import org.qortal.data.PaymentData; import org.qortal.data.group.GroupApprovalData; import org.qortal.data.transaction.BaseTransactionData; @@ -386,8 +387,8 @@ public class HSQLDBTransactionRepository implements TransactionRepository { @Override public List getSignaturesMatchingCriteria(Integer startBlock, Integer blockLimit, Integer txGroupId, - List txTypes, Integer service, String address, - ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException { + List txTypes, Service service, String name, String address, + ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException { List signatures = new ArrayList<>(); boolean hasAddress = address != null && !address.isEmpty(); @@ -412,8 +413,8 @@ public class HSQLDBTransactionRepository implements TransactionRepository { signatureColumn = "TransactionParticipants.signature"; } - if (service != null) { - // This is for ARBITRARY transactions + if (service != null || name != null) { + // These are for ARBITRARY transactions tables.append(" LEFT OUTER JOIN ArbitraryTransactions ON ArbitraryTransactions.signature = Transactions.signature"); } @@ -466,7 +467,12 @@ public class HSQLDBTransactionRepository implements TransactionRepository { if (service != null) { whereClauses.add("ArbitraryTransactions.service = ?"); - bindParams.add(service); + bindParams.add(service.value); + } + + if (name != null) { + whereClauses.add("lower(ArbitraryTransactions.name) = ?"); + bindParams.add(name.toLowerCase()); } if (hasAddress) { diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index 5f7698b3..a40b14a9 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -5,8 +5,8 @@ import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.io.Reader; -import java.util.List; -import java.util.Locale; +import java.nio.file.Paths; +import java.util.*; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; @@ -22,19 +22,27 @@ import org.eclipse.persistence.exceptions.XMLMarshalException; import org.eclipse.persistence.jaxb.JAXBContextFactory; import org.eclipse.persistence.jaxb.UnmarshallerProperties; import org.qortal.block.BlockChain; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager.*; import org.qortal.crosschain.Bitcoin.BitcoinNet; import org.qortal.crosschain.Litecoin.LitecoinNet; import org.qortal.crosschain.Dogecoin.DogecoinNet; +import org.qortal.utils.EnumUtils; // All properties to be converted to JSON via JAXB @XmlAccessorType(XmlAccessType.FIELD) public class Settings { - private static final int MAINNET_LISTEN_PORT = 12392; - private static final int TESTNET_LISTEN_PORT = 62392; + private static final int MAINNET_LISTEN_PORT = 12394; + private static final int TESTNET_LISTEN_PORT = 62394; - private static final int MAINNET_API_PORT = 12391; - private static final int TESTNET_API_PORT = 62391; + private static final int MAINNET_API_PORT = 12393; + private static final int TESTNET_API_PORT = 62393; + + private static final int MAINNET_DOMAIN_MAP_PORT = 80; + private static final int TESTNET_DOMAIN_MAP_PORT = 8080; + + private static final int MAINNET_GATEWAY_PORT = 80; + private static final int TESTNET_GATEWAY_PORT = 8080; private static final Logger LOGGER = LogManager.getLogger(Settings.class); private static final String SETTINGS_FILENAME = "settings.json"; @@ -64,20 +72,36 @@ public class Settings { // API-related private boolean apiEnabled = true; private Integer apiPort; + private boolean apiWhitelistEnabled = true; private String[] apiWhitelist = new String[] { "::1", "127.0.0.1" }; - private Boolean apiRestricted; + + /** Legacy API key (deprecated Nov 2021). Use /admin/apikey/generate API endpoint instead */ private String apiKey = null; - /** Whether to disable API key or loopback address checking - * IMPORTANT: do not disable for shared nodes or low-security local networks */ - private boolean apiKeyDisabled = false; + /** Storage location for API key generated by API (Nov 2021 onwards) */ + private String apiKeyPath = ""; + /** Whether to allow automatic authentication from localhost (loopback) addresses */ + private boolean localAuthBypassEnabled = false; + + private Boolean apiRestricted; private boolean apiLoggingEnabled = false; private boolean apiDocumentationEnabled = false; // Both of these need to be set for API to use SSL private String sslKeystorePathname = null; private String sslKeystorePassword = null; + // Domain mapping + private Integer domainMapPort; + private boolean domainMapEnabled = false; + private boolean domainMapLoggingEnabled = false; + private List domainMap = null; + + // Gateway + private Integer gatewayPort; + private boolean gatewayEnabled = false; + private boolean gatewayLoggingEnabled = false; + // Specific to this node private boolean wipeUnconfirmedOnStart = false; /** Maximum number of unconfirmed transactions allowed per account */ @@ -182,6 +206,17 @@ public class Settings { /** Maximum time (in seconds) that we should attempt to remain connected to a peer for */ private int maxPeerConnectionTime = 20 * 60; // seconds + /** Whether to sync multiple blocks at once in normal operation */ + private boolean fastSyncEnabled = true; + /** Whether to sync multiple blocks at once when the peer has a different chain */ + private boolean fastSyncEnabledWhenResolvingFork = true; + /** Maximum number of blocks to request at once */ + private int maxBlocksPerRequest = 100; + /** Maximum number of blocks this node will serve in a single response */ + private int maxBlocksPerResponse = 200; + /** Maximum number of untrimmed blocks this node will serve in a single response */ + private int maxUntrimmedBlocksPerResponse = 10; + // Which blockchains this node is running private String blockchainConfig = null; // use default from resources private BitcoinNet bitcoinNet = BitcoinNet.MAIN; @@ -238,6 +273,69 @@ public class Settings { /** Additional offset added to values returned by NTP.getTime() */ private Long testNtpOffset = null; + + // Data storage (QDN) + + /** Data storage enabled/disabled*/ + private boolean qdnEnabled = true; + /** Data storage path. */ + private String dataPath = "data"; + /** Data storage path (for temporary data). Defaults to {dataPath}/_temp */ + private String tempDataPath = null; + + /** Storage policy to indicate which data should be hosted */ + private String storagePolicy = "FOLLOWED_AND_VIEWED"; + + /** Whether to allow data outside of the storage policy to be relayed between other peers */ + private boolean relayModeEnabled = true; + + /** Whether to remember which data was originally uploaded using this node. + * This prevents auto deletion of own files when storage limits are reached. */ + private boolean originalCopyIndicatorFileEnabled = true; + + /** Whether to make connections directly with peers that have the required data */ + private boolean directDataRetrievalEnabled = true; + + /** Expiry time (ms) for (unencrypted) built/cached data */ + private Long builtDataExpiryInterval = 30 * 24 * 60 * 60 * 1000L; // 30 days + + /** Whether to validate every layer when building arbitrary data, or just the final layer */ + private boolean validateAllDataLayers = false; + + /** Whether to allow public (decryptable) data to be stored */ + private boolean publicDataEnabled = true; + /** Whether to allow private (non-decryptable) data to be stored */ + private boolean privateDataEnabled = false; + + /** Maximum total size of hosted data, in bytes. Unlimited if null */ + private Long maxStorageCapacity = null; + + // Domain mapping + public static class DomainMap { + private String domain; + private String name; + + private DomainMap() { // makes JAXB happy; will never be invoked + } + + public String getDomain() { + return domain; + } + + public void setDomain(String domain) { + this.domain = domain; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + // Constructors private Settings() { @@ -362,6 +460,13 @@ public class Settings { if (this.apiKey != null && this.apiKey.trim().length() < 8) throwValidationError("apiKey must be at least 8 characters"); + + try { + StoragePolicy.valueOf(this.storagePolicy); + } catch (IllegalArgumentException ex) { + String possibleValues = EnumUtils.getNames(StoragePolicy.class, ", "); + throwValidationError(String.format("storagePolicy must be one of: %s", possibleValues)); + } } // Getters / setters @@ -398,6 +503,10 @@ public class Settings { } public String[] getApiWhitelist() { + if (!this.apiWhitelistEnabled) { + // Allow all connections if the whitelist is disabled + return new String[] {"0.0.0.0/0", "::/0"}; + } return this.apiWhitelist; } @@ -414,8 +523,12 @@ public class Settings { return this.apiKey; } - public boolean isApiKeyDisabled() { - return this.apiKeyDisabled; + public String getApiKeyPath() { + return this.apiKeyPath; + } + + public boolean isLocalAuthBypassEnabled() { + return this.localAuthBypassEnabled; } public boolean isApiLoggingEnabled() { @@ -434,6 +547,51 @@ public class Settings { return this.sslKeystorePassword; } + public int getDomainMapPort() { + if (this.domainMapPort != null) + return this.domainMapPort; + + return this.isTestNet ? TESTNET_DOMAIN_MAP_PORT : MAINNET_DOMAIN_MAP_PORT; + } + + public boolean isDomainMapEnabled() { + return this.domainMapEnabled; + } + + public boolean isDomainMapLoggingEnabled() { + return this.domainMapLoggingEnabled; + } + + public Map getSimpleDomainMap() { + HashMap map = new HashMap<>(); + for (DomainMap dMap : this.domainMap) { + map.put(dMap.getDomain(), dMap.getName()); + + // If the domain doesn't include a subdomain then add a www. alternative + if (dMap.getDomain().chars().filter(c -> c == '.').count() == 1) { + map.put("www.".concat(dMap.getDomain()), dMap.getName()); + } + } + return map; + } + + + public int getGatewayPort() { + if (this.gatewayPort != null) + return this.gatewayPort; + + return this.isTestNet ? TESTNET_GATEWAY_PORT : MAINNET_GATEWAY_PORT; + } + + public boolean isGatewayEnabled() { + return this.gatewayEnabled; + } + + public boolean isGatewayLoggingEnabled() { + return this.gatewayLoggingEnabled; + } + + public boolean getWipeUnconfirmedOnStart() { return this.wipeUnconfirmedOnStart; } @@ -539,6 +697,20 @@ public class Settings { return this.bootstrapFilenamePrefix; } + public boolean isFastSyncEnabled() { + return this.fastSyncEnabled; + } + + public boolean isFastSyncEnabledWhenResolvingFork() { + return this.fastSyncEnabledWhenResolvingFork; + } + + public int getMaxBlocksPerRequest() { return this.maxBlocksPerRequest; } + + public int getMaxBlocksPerResponse() { return this.maxBlocksPerResponse; } + + public int getMaxUntrimmedBlocksPerResponse() { return this.maxUntrimmedBlocksPerResponse; } + public boolean isAutoUpdateEnabled() { return this.autoUpdateEnabled; } @@ -664,4 +836,56 @@ public class Settings { return this.bootstrap; } + + public boolean isQdnEnabled() { + return this.qdnEnabled; + } + + public String getDataPath() { + return this.dataPath; + } + + public String getTempDataPath() { + if (this.tempDataPath != null) { + return this.tempDataPath; + } + // Default the temp path to a "_temp" folder inside the data directory + return Paths.get(this.getDataPath(), "_temp").toString(); + } + + public StoragePolicy getStoragePolicy() { + return StoragePolicy.valueOf(this.storagePolicy); + } + + public boolean isRelayModeEnabled() { + return this.relayModeEnabled; + } + + public boolean isDirectDataRetrievalEnabled() { + return this.directDataRetrievalEnabled; + } + + public boolean isOriginalCopyIndicatorFileEnabled() { + return this.originalCopyIndicatorFileEnabled; + } + + public Long getBuiltDataExpiryInterval() { + return this.builtDataExpiryInterval; + } + + public boolean shouldValidateAllDataLayers() { + return this.validateAllDataLayers; + } + + public boolean isPublicDataEnabled() { + return this.publicDataEnabled; + } + + public boolean isPrivateDataEnabled() { + return this.privateDataEnabled; + } + + public Long getMaxStorageCapacity() { + return this.maxStorageCapacity; + } } diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index f75b7f19..d8740351 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -1,15 +1,30 @@ package org.qortal.transaction; +import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import org.qortal.account.Account; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.crypto.Crypto; +import org.qortal.crypto.MemoryPoW; import org.qortal.data.PaymentData; +import org.qortal.data.naming.NameData; import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.TransactionData; +import org.qortal.network.Network; +import org.qortal.network.message.ArbitrarySignaturesMessage; +import org.qortal.network.message.Message; import org.qortal.payment.Payment; import org.qortal.repository.DataException; import org.qortal.repository.Repository; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.transform.TransformationException; +import org.qortal.transform.transaction.ArbitraryTransactionTransformer; +import org.qortal.transform.transaction.TransactionTransformer; +import org.qortal.utils.ArbitraryTransactionUtils; public class ArbitraryTransaction extends Transaction { @@ -18,6 +33,10 @@ public class ArbitraryTransaction extends Transaction { // Other useful constants public static final int MAX_DATA_SIZE = 4000; + public static final int MAX_METADATA_LENGTH = 32; + public static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH; + public static final int POW_BUFFER_SIZE = 8 * 1024 * 1024; // bytes + public static final int MAX_IDENTIFIER_LENGTH = 64; // Constructors @@ -42,17 +61,148 @@ public class ArbitraryTransaction extends Transaction { // Processing + public void computeNonce() throws DataException { + byte[] transactionBytes; + + try { + transactionBytes = TransactionTransformer.toBytesForSigning(this.transactionData); + } catch (TransformationException e) { + throw new RuntimeException("Unable to transform transaction to byte array for verification", e); + } + + // Clear nonce from transactionBytes + ArbitraryTransactionTransformer.clearNonce(transactionBytes); + + // Calculate nonce + int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty(); + this.arbitraryTransactionData.setNonce(MemoryPoW.compute2(transactionBytes, POW_BUFFER_SIZE, difficulty)); + } + + @Override + public ValidationResult isFeeValid() throws DataException { + if (this.transactionData.getFee() < 0) + return ValidationResult.NEGATIVE_FEE; + + return ValidationResult.OK; + } + + @Override + public boolean hasValidReference() throws DataException { + // We shouldn't really get this far, but just in case: + if (this.arbitraryTransactionData.getReference() == null) { + return false; + } + + // If the account current doesn't have a last reference, and the fee is 0, we will allow any value. + // This ensures that the first transaction for an account will be valid whilst still validating + // the last reference from the second transaction onwards. By checking for a zero fee, we ensure + // standard last reference validation when fee > 0. + Account creator = getCreator(); + Long fee = this.arbitraryTransactionData.getFee(); + if (creator.getLastReference() == null && fee == 0) { + return true; + } + + return super.hasValidReference(); + } + @Override public ValidationResult isValid() throws DataException { - // Check data length - if (arbitraryTransactionData.getData().length < 1 || arbitraryTransactionData.getData().length > MAX_DATA_SIZE) + // Check that some data - or a data hash - has been supplied + if (arbitraryTransactionData.getData() == null) { return ValidationResult.INVALID_DATA_LENGTH; + } + + // Check data length + if (arbitraryTransactionData.getData().length < 1 || arbitraryTransactionData.getData().length > MAX_DATA_SIZE) { + return ValidationResult.INVALID_DATA_LENGTH; + } + + // Check hashes and metadata + if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.DATA_HASH) { + // Check length of data hash + if (arbitraryTransactionData.getData().length != HASH_LENGTH) { + return ValidationResult.INVALID_DATA_LENGTH; + } + + // Version 5+ + if (arbitraryTransactionData.getVersion() >= 5) { + byte[] metadata = arbitraryTransactionData.getMetadataHash(); + + // Check maximum length of metadata hash + if (metadata != null && metadata.length > MAX_METADATA_LENGTH) { + return ValidationResult.INVALID_DATA_LENGTH; + } + } + } + + // Check raw data + if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.RAW_DATA) { + // Version 5+ + if (arbitraryTransactionData.getVersion() >= 5) { + // Check reported length of the raw data + // We should not download the raw data, so validation of that will be performed later + if (arbitraryTransactionData.getSize() > ArbitraryDataFile.MAX_FILE_SIZE) { + return ValidationResult.INVALID_DATA_LENGTH; + } + } + } + + // Check name if one has been included + if (arbitraryTransactionData.getName() != null) { + NameData nameData = this.repository.getNameRepository().fromName(arbitraryTransactionData.getName()); + + // Check the name is registered + if (nameData == null) { + return ValidationResult.NAME_DOES_NOT_EXIST; + } + + // Check that the transaction signer owns the name + if (!Objects.equals(this.getCreator().getAddress(), nameData.getOwner())) { + return ValidationResult.INVALID_NAME_OWNER; + } + } // Wrap and delegate final payment validity checks to Payment class return new Payment(this.repository).isValid(arbitraryTransactionData.getSenderPublicKey(), arbitraryTransactionData.getPayments(), arbitraryTransactionData.getFee()); } + @Override + public boolean isSignatureValid() { + byte[] signature = this.transactionData.getSignature(); + if (signature == null) { + return false; + } + + byte[] transactionBytes; + + try { + transactionBytes = ArbitraryTransactionTransformer.toBytesForSigning(this.transactionData); + } catch (TransformationException e) { + throw new RuntimeException("Unable to transform transaction to byte array for verification", e); + } + + if (!Crypto.verify(this.transactionData.getCreatorPublicKey(), signature, transactionBytes)) { + return false; + } + + // Nonce wasn't added until version 5+ + if (arbitraryTransactionData.getVersion() >= 5) { + + int nonce = arbitraryTransactionData.getNonce(); + + // Clear nonce from transactionBytes + ArbitraryTransactionTransformer.clearNonce(transactionBytes); + + // Check nonce + int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty(); + return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce); + } + + return true; + } + @Override public ValidationResult isProcessable() throws DataException { // Wrap and delegate final payment processable checks to Payment class @@ -60,6 +210,30 @@ public class ArbitraryTransaction extends Transaction { arbitraryTransactionData.getFee()); } + @Override + protected void onImportAsUnconfirmed() throws DataException { + // We may need to move files from the misc_ folder + ArbitraryTransactionUtils.checkAndRelocateMiscFiles(arbitraryTransactionData); + + // If the data is local, we need to perform a few actions + if (isDataLocal()) { + + // We have the data for this transaction, so invalidate the cache + if (arbitraryTransactionData.getName() != null) { + ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData); + } + + // We also need to broadcast to the network that we are now hosting files for this transaction, + // but only if these files are in accordance with our storage policy + if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) { + // Use a null peer address to indicate our own + byte[] signature = arbitraryTransactionData.getSignature(); + Message arbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, Arrays.asList(signature)); + Network.getInstance().broadcast(broadcastPeer -> arbitrarySignatureMessage); + } + } + } + @Override public void preProcess() throws DataException { // Nothing to do @@ -100,10 +274,9 @@ public class ArbitraryTransaction extends Transaction { /** Returns arbitrary data payload, fetching from network if needed. Can block for a while! */ public byte[] fetchData() throws DataException { // If local, read from file - if (isDataLocal()) + if (isDataLocal()) { return this.repository.getArbitraryRepository().fetchData(this.transactionData.getSignature()); - - // TODO If not local, attempt to fetch via network? + } return null; } diff --git a/src/main/java/org/qortal/transaction/ChatTransaction.java b/src/main/java/org/qortal/transaction/ChatTransaction.java index 8371b5b7..a486d408 100644 --- a/src/main/java/org/qortal/transaction/ChatTransaction.java +++ b/src/main/java/org/qortal/transaction/ChatTransaction.java @@ -8,6 +8,7 @@ import org.qortal.account.PublicKeyAccount; import org.qortal.asset.Asset; import org.qortal.crypto.Crypto; import org.qortal.crypto.MemoryPoW; +import org.qortal.data.naming.NameData; import org.qortal.data.transaction.ChatTransactionData; import org.qortal.data.transaction.TransactionData; import org.qortal.group.Group; @@ -144,10 +145,22 @@ public class ChatTransaction extends Transaction { public ValidationResult isValid() throws DataException { // Nonce checking is done via isSignatureValid() as that method is only called once per import - // Check for blacklisted author by address + // Check for blocked author by address ResourceListManager listManager = ResourceListManager.getInstance(); - if (listManager.listContains("blacklist", "address", this.chatTransactionData.getSender())) { - return ValidationResult.ADDRESS_IN_BLACKLIST; + if (listManager.listContains("blockedAddresses", this.chatTransactionData.getSender(), true)) { + return ValidationResult.ADDRESS_BLOCKED; + } + + // Check for blocked author by registered name + List names = this.repository.getNameRepository().getNamesByOwner(this.chatTransactionData.getSender()); + if (names != null && names.size() > 0) { + for (NameData nameData : names) { + if (nameData != null && nameData.getName() != null) { + if (listManager.listContains("blockedNames", nameData.getName(), false)) { + return ValidationResult.NAME_BLOCKED; + } + } + } } // If we exist in the repository then we've been imported as unconfirmed, diff --git a/src/main/java/org/qortal/transaction/Transaction.java b/src/main/java/org/qortal/transaction/Transaction.java index 7eb93bc4..43591374 100644 --- a/src/main/java/org/qortal/transaction/Transaction.java +++ b/src/main/java/org/qortal/transaction/Transaction.java @@ -247,7 +247,8 @@ public abstract class Transaction { INVALID_GROUP_BLOCK_DELAY(93), INCORRECT_NONCE(94), INVALID_TIMESTAMP_SIGNATURE(95), - ADDRESS_IN_BLACKLIST(96), + ADDRESS_BLOCKED(96), + NAME_BLOCKED(97), INVALID_BUT_OK(999), NOT_YET_RELEASED(1000); @@ -316,6 +317,10 @@ public abstract class Transaction { return this.transactionData; } + public void setRepository(Repository repository) { + this.repository = repository; + } + // More information public static long getDeadline(TransactionData transactionData) { @@ -345,6 +350,10 @@ public abstract class Transaction { long unitFee = BlockChain.getInstance().getUnitFee(); int maxBytePerUnitFee = BlockChain.getInstance().getMaxBytesPerUnitFee(); + // If the unit fee is zero, any fee is enough to cover the byte-length of the transaction + if (unitFee == 0) { + return true; + } return this.feePerByte() >= maxBytePerUnitFee / unitFee; } @@ -373,7 +382,7 @@ public abstract class Transaction { * @return transaction version number */ public static int getVersionByTimestamp(long timestamp) { - return 4; + return 5; // TODO: hard fork timestamp!! } /** diff --git a/src/main/java/org/qortal/transform/Transformer.java b/src/main/java/org/qortal/transform/Transformer.java index 341d545b..e78d3284 100644 --- a/src/main/java/org/qortal/transform/Transformer.java +++ b/src/main/java/org/qortal/transform/Transformer.java @@ -20,5 +20,6 @@ public abstract class Transformer { public static final int MD5_LENGTH = 16; public static final int SHA256_LENGTH = 32; + public static final int AES256_LENGTH = 32; } diff --git a/src/main/java/org/qortal/transform/block/BlockTransformer.java b/src/main/java/org/qortal/transform/block/BlockTransformer.java index 8b91fd11..cce3e7d7 100644 --- a/src/main/java/org/qortal/transform/block/BlockTransformer.java +++ b/src/main/java/org/qortal/transform/block/BlockTransformer.java @@ -74,19 +74,30 @@ public class BlockTransformer extends Transformer { } /** - * Extract block data and transaction data from serialized bytes. - * + * Extract block data and transaction data from serialized bytes containing a single block. + * * @param bytes * @return BlockData and a List of transactions. * @throws TransformationException */ public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException { + return BlockTransformer.fromByteBuffer(byteBuffer, true); + } + + /** + * Extract block data and transaction data from serialized bytes containing one or more blocks. + * + * @param bytes + * @return the next block's BlockData and a List of transactions. + * @throws TransformationException + */ + public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer, boolean finalBlockInBuffer) throws TransformationException { int version = byteBuffer.getInt(); - if (byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) + if (finalBlockInBuffer && byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) throw new TransformationException("Byte data too short for Block"); - if (byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) + if (finalBlockInBuffer && byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) throw new TransformationException("Byte data too long for Block"); long timestamp = byteBuffer.getLong(); @@ -210,7 +221,8 @@ public class BlockTransformer extends Transformer { byteBuffer.get(onlineAccountsSignatures); } - if (byteBuffer.hasRemaining()) + // We should only complain about excess byte data if we aren't expecting more blocks in this ByteBuffer + if (finalBlockInBuffer && byteBuffer.hasRemaining()) throw new TransformationException("Excess byte data found after parsing Block"); // We don't have a height! diff --git a/src/main/java/org/qortal/transform/transaction/ArbitraryTransactionTransformer.java b/src/main/java/org/qortal/transform/transaction/ArbitraryTransactionTransformer.java index 3402ca66..e1514b4b 100644 --- a/src/main/java/org/qortal/transform/transaction/ArbitraryTransactionTransformer.java +++ b/src/main/java/org/qortal/transform/transaction/ArbitraryTransactionTransformer.java @@ -6,12 +6,15 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import com.google.common.base.Utf8; +import org.qortal.arbitrary.misc.Service; import org.qortal.crypto.Crypto; import org.qortal.data.PaymentData; import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.BaseTransactionData; import org.qortal.data.transaction.TransactionData; import org.qortal.data.transaction.ArbitraryTransactionData.DataType; +import org.qortal.naming.Name; import org.qortal.transaction.ArbitraryTransaction; import org.qortal.transaction.Transaction; import org.qortal.transaction.Transaction.TransactionType; @@ -26,12 +29,23 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { // Property lengths private static final int SERVICE_LENGTH = INT_LENGTH; + private static final int NONCE_LENGTH = INT_LENGTH; private static final int DATA_TYPE_LENGTH = BYTE_LENGTH; private static final int DATA_SIZE_LENGTH = INT_LENGTH; + private static final int RAW_DATA_SIZE_LENGTH = INT_LENGTH; + private static final int METADATA_HASH_SIZE_LENGTH = INT_LENGTH; private static final int NUMBER_PAYMENTS_LENGTH = INT_LENGTH; + private static final int NAME_SIZE_LENGTH = INT_LENGTH; + private static final int IDENTIFIER_SIZE_LENGTH = INT_LENGTH; + private static final int COMPRESSION_LENGTH = INT_LENGTH; + private static final int METHOD_LENGTH = INT_LENGTH; + private static final int SECRET_LENGTH = INT_LENGTH; // TODO: wtf? private static final int EXTRAS_LENGTH = SERVICE_LENGTH + DATA_TYPE_LENGTH + DATA_SIZE_LENGTH; + private static final int EXTRAS_V5_LENGTH = NONCE_LENGTH + NAME_SIZE_LENGTH + IDENTIFIER_SIZE_LENGTH + + METHOD_LENGTH + SECRET_LENGTH + COMPRESSION_LENGTH + RAW_DATA_SIZE_LENGTH + METADATA_HASH_SIZE_LENGTH; + protected static final TransactionLayout layout; static { @@ -41,8 +55,18 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { layout.add("transaction's groupID", TransformationType.INT); layout.add("reference", TransformationType.SIGNATURE); layout.add("sender's public key", TransformationType.PUBLIC_KEY); - layout.add("number of payments", TransformationType.INT); + layout.add("nonce", TransformationType.INT); // Version 5+ + layout.add("name length", TransformationType.INT); // Version 5+ + layout.add("name", TransformationType.DATA); // Version 5+ + layout.add("identifier length", TransformationType.INT); // Version 5+ + layout.add("identifier", TransformationType.DATA); // Version 5+ + layout.add("method", TransformationType.INT); // Version 5+ + layout.add("secret length", TransformationType.INT); // Version 5+ + layout.add("secret", TransformationType.DATA); // Version 5+ + layout.add("compression", TransformationType.INT); // Version 5+ + + layout.add("number of payments", TransformationType.INT); layout.add("* recipient", TransformationType.ADDRESS); layout.add("* asset ID of payment", TransformationType.LONG); layout.add("* payment amount", TransformationType.AMOUNT); @@ -51,6 +75,11 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { layout.add("is data raw?", TransformationType.BOOLEAN); layout.add("data length", TransformationType.INT); layout.add("data", TransformationType.DATA); + + layout.add("raw data size", TransformationType.INT); // Version 5+ + layout.add("metadata hash length", TransformationType.INT); // Version 5+ + layout.add("metadata hash", TransformationType.DATA); // Version 5+ + layout.add("fee", TransformationType.AMOUNT); layout.add("signature", TransformationType.SIGNATURE); } @@ -67,6 +96,32 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { byte[] senderPublicKey = Serialization.deserializePublicKey(byteBuffer); + int nonce = 0; + String name = null; + String identifier = null; + ArbitraryTransactionData.Method method = null; + byte[] secret = null; + ArbitraryTransactionData.Compression compression = null; + + if (version >= 5) { + nonce = byteBuffer.getInt(); + + name = Serialization.deserializeSizedString(byteBuffer, Name.MAX_NAME_SIZE); + + identifier = Serialization.deserializeSizedString(byteBuffer, ArbitraryTransaction.MAX_IDENTIFIER_LENGTH); + + method = ArbitraryTransactionData.Method.valueOf(byteBuffer.getInt()); + + int secretLength = byteBuffer.getInt(); + + if (secretLength > 0) { + secret = new byte[secretLength]; + byteBuffer.get(secret); + } + + compression = ArbitraryTransactionData.Compression.valueOf(byteBuffer.getInt()); + } + // Always return a list of payments, even if empty List payments = new ArrayList<>(); if (version != 1) { @@ -76,7 +131,7 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { payments.add(PaymentTransformer.fromByteBuffer(byteBuffer)); } - int service = byteBuffer.getInt(); + Service service = Service.valueOf(byteBuffer.getInt()); // We might be receiving hash of data instead of actual raw data boolean isRaw = byteBuffer.get() != 0; @@ -91,6 +146,20 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { byte[] data = new byte[dataSize]; byteBuffer.get(data); + int size = 0; + byte[] metadataHash = null; + + if (version >= 5) { + size = byteBuffer.getInt(); + + int metadataHashLength = byteBuffer.getInt(); + + if (metadataHashLength > 0) { + metadataHash = new byte[metadataHashLength]; + byteBuffer.get(metadataHash); + } + } + long fee = byteBuffer.getLong(); byte[] signature = new byte[SIGNATURE_LENGTH]; @@ -98,13 +167,24 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, senderPublicKey, fee, signature); - return new ArbitraryTransactionData(baseTransactionData, version, service, data, dataType, payments); + return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name, identifier, + method, secret, compression, data, dataType, metadataHash, payments); } public static int getDataLength(TransactionData transactionData) throws TransformationException { ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; - int length = getBaseLength(transactionData) + EXTRAS_LENGTH + arbitraryTransactionData.getData().length; + int nameLength = (arbitraryTransactionData.getName() != null) ? Utf8.encodedLength(arbitraryTransactionData.getName()) : 0; + int identifierLength = (arbitraryTransactionData.getIdentifier() != null) ? Utf8.encodedLength(arbitraryTransactionData.getIdentifier()) : 0; + int secretLength = (arbitraryTransactionData.getSecret() != null) ? arbitraryTransactionData.getSecret().length : 0; + int dataLength = (arbitraryTransactionData.getData() != null) ? arbitraryTransactionData.getData().length : 0; + int metadataHashLength = (arbitraryTransactionData.getMetadataHash() != null) ? arbitraryTransactionData.getMetadataHash().length : 0; + + int length = getBaseLength(transactionData) + EXTRAS_LENGTH + nameLength + identifierLength + secretLength + dataLength + metadataHashLength; + + if (arbitraryTransactionData.getVersion() >= 5) { + length += EXTRAS_V5_LENGTH; + } // Optional payments length += NUMBER_PAYMENTS_LENGTH + arbitraryTransactionData.getPayments().size() * PaymentTransformer.getDataLength(); @@ -120,19 +200,51 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { transformCommonBytes(transactionData, bytes); + if (arbitraryTransactionData.getVersion() >= 5) { + bytes.write(Ints.toByteArray(arbitraryTransactionData.getNonce())); + + Serialization.serializeSizedString(bytes, arbitraryTransactionData.getName()); + + Serialization.serializeSizedString(bytes, arbitraryTransactionData.getIdentifier()); + + bytes.write(Ints.toByteArray(arbitraryTransactionData.getMethod().value)); + + byte[] secret = arbitraryTransactionData.getSecret(); + int secretLength = (secret != null) ? secret.length : 0; + bytes.write(Ints.toByteArray(secretLength)); + + if (secretLength > 0) { + bytes.write(secret); + } + + bytes.write(Ints.toByteArray(arbitraryTransactionData.getCompression().value)); + } + List payments = arbitraryTransactionData.getPayments(); bytes.write(Ints.toByteArray(payments.size())); for (PaymentData paymentData : payments) bytes.write(PaymentTransformer.toBytes(paymentData)); - bytes.write(Ints.toByteArray(arbitraryTransactionData.getService())); + bytes.write(Ints.toByteArray(arbitraryTransactionData.getService().value)); bytes.write((byte) (arbitraryTransactionData.getDataType() == DataType.RAW_DATA ? 1 : 0)); bytes.write(Ints.toByteArray(arbitraryTransactionData.getData().length)); bytes.write(arbitraryTransactionData.getData()); + if (arbitraryTransactionData.getVersion() >= 5) { + bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize())); + + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0; + bytes.write(Ints.toByteArray(metadataHashLength)); + + if (metadataHashLength > 0) { + bytes.write(metadataHash); + } + } + bytes.write(Longs.toByteArray(arbitraryTransactionData.getFee())); if (arbitraryTransactionData.getSignature() != null) @@ -159,6 +271,26 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { transformCommonBytes(arbitraryTransactionData, bytes); + if (arbitraryTransactionData.getVersion() >= 5) { + bytes.write(Ints.toByteArray(arbitraryTransactionData.getNonce())); + + Serialization.serializeSizedString(bytes, arbitraryTransactionData.getName()); + + Serialization.serializeSizedString(bytes, arbitraryTransactionData.getIdentifier()); + + bytes.write(Ints.toByteArray(arbitraryTransactionData.getMethod().value)); + + byte[] secret = arbitraryTransactionData.getSecret(); + int secretLength = (secret != null) ? secret.length : 0; + bytes.write(Ints.toByteArray(secretLength)); + + if (secretLength > 0) { + bytes.write(secret); + } + + bytes.write(Ints.toByteArray(arbitraryTransactionData.getCompression().value)); + } + if (arbitraryTransactionData.getVersion() != 1) { List payments = arbitraryTransactionData.getPayments(); bytes.write(Ints.toByteArray(payments.size())); @@ -167,7 +299,7 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { bytes.write(PaymentTransformer.toBytes(paymentData)); } - bytes.write(Ints.toByteArray(arbitraryTransactionData.getService())); + bytes.write(Ints.toByteArray(arbitraryTransactionData.getService().value)); bytes.write(Ints.toByteArray(arbitraryTransactionData.getData().length)); @@ -182,6 +314,18 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { break; } + if (arbitraryTransactionData.getVersion() >= 5) { + bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize())); + + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0; + bytes.write(Ints.toByteArray(metadataHashLength)); + + if (metadataHashLength > 0) { + bytes.write(metadataHash); + } + } + bytes.write(Longs.toByteArray(arbitraryTransactionData.getFee())); // Never append signature @@ -192,4 +336,13 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer { } } + public static void clearNonce(byte[] transactionBytes) { + int nonceIndex = TYPE_LENGTH + TIMESTAMP_LENGTH + GROUPID_LENGTH + REFERENCE_LENGTH + PUBLIC_KEY_LENGTH; + + transactionBytes[nonceIndex++] = (byte) 0; + transactionBytes[nonceIndex++] = (byte) 0; + transactionBytes[nonceIndex++] = (byte) 0; + transactionBytes[nonceIndex++] = (byte) 0; + } + } diff --git a/src/main/java/org/qortal/utils/ArbitraryTransactionUtils.java b/src/main/java/org/qortal/utils/ArbitraryTransactionUtils.java new file mode 100644 index 00000000..51c2b2d3 --- /dev/null +++ b/src/main/java/org/qortal/utils/ArbitraryTransactionUtils.java @@ -0,0 +1,355 @@ +package org.qortal.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataFileChunk; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.settings.Settings; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + + +public class ArbitraryTransactionUtils { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryTransactionUtils.class); + + public static ArbitraryTransactionData fetchTransactionData(final Repository repository, final byte[] signature) { + try { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) + return null; + + return (ArbitraryTransactionData) transactionData; + + } catch (DataException e) { + LOGGER.error("Repository issue when fetching arbitrary transaction data", e); + return null; + } + } + + public static ArbitraryTransactionData fetchLatestPut(Repository repository, ArbitraryTransactionData arbitraryTransactionData) { + if (arbitraryTransactionData == null) { + return null; + } + + String name = arbitraryTransactionData.getName(); + Service service = arbitraryTransactionData.getService(); + String identifier = arbitraryTransactionData.getIdentifier(); + + if (name == null || service == null) { + return null; + } + + // Get the most recent PUT for this name and service + ArbitraryTransactionData latestPut; + try { + latestPut = repository.getArbitraryRepository() + .getLatestTransaction(name, service, ArbitraryTransactionData.Method.PUT, identifier); + } catch (DataException e) { + return null; + } + + return latestPut; + } + + public static boolean hasMoreRecentPutTransaction(Repository repository, ArbitraryTransactionData arbitraryTransactionData) { + byte[] signature = arbitraryTransactionData.getSignature(); + if (signature == null) { + // We can't make a sensible decision without a signature + // so it's best to assume there is nothing newer + return false; + } + + ArbitraryTransactionData latestPut = ArbitraryTransactionUtils.fetchLatestPut(repository, arbitraryTransactionData); + if (latestPut == null) { + return false; + } + + // If the latest PUT transaction has a newer timestamp, it will override the existing transaction + // Any data relating to the older transaction is no longer needed + boolean hasNewerPut = (latestPut.getTimestamp() > arbitraryTransactionData.getTimestamp()); + return hasNewerPut; + } + + public static boolean completeFileExists(ArbitraryTransactionData transactionData) throws DataException { + if (transactionData == null) { + return false; + } + + byte[] digest = transactionData.getData(); + byte[] signature = transactionData.getSignature(); + + // Load complete file + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + return arbitraryDataFile.exists(); + + } + + public static boolean allChunksExist(ArbitraryTransactionData transactionData) throws DataException { + if (transactionData == null) { + return false; + } + + byte[] digest = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + byte[] signature = transactionData.getSignature(); + + // Load complete file and chunks + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + return arbitraryDataFile.allChunksExist(); + } + + public static boolean anyChunksExist(ArbitraryTransactionData transactionData) throws DataException { + if (transactionData == null) { + return false; + } + + byte[] digest = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + byte[] signature = transactionData.getSignature(); + + if (metadataHash == null) { + // This file doesn't have any metadata/chunks, which means none exist + return false; + } + + // Load complete file and chunks + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + return arbitraryDataFile.anyChunksExist(); + } + + public static int ourChunkCount(ArbitraryTransactionData transactionData) throws DataException { + if (transactionData == null) { + return 0; + } + + byte[] digest = transactionData.getData(); + byte[] metadataHash = transactionData.getMetadataHash(); + byte[] signature = transactionData.getSignature(); + + if (metadataHash == null) { + // This file doesn't have any metadata, therefore it has no chunks + return 0; + } + + // Load complete file and chunks + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + + return arbitraryDataFile.chunkCount(); + } + + public static boolean isFileRecent(Path filePath, long now, long cleanupAfter) { + try { + BasicFileAttributes attr = Files.readAttributes(filePath, BasicFileAttributes.class); + long timeSinceCreated = now - attr.creationTime().toMillis(); + long timeSinceModified = now - attr.lastModifiedTime().toMillis(); + //LOGGER.info(String.format("timeSinceCreated for path %s is %d. cleanupAfter: %d", filePath, timeSinceCreated, cleanupAfter)); + + // Check if the file has been created or modified recently + if (timeSinceCreated > cleanupAfter) { + return false; + } + if (timeSinceModified > cleanupAfter) { + return false; + } + + } catch (IOException e) { + // Can't read file attributes, so assume it's recent so that we don't delete something accidentally + } + return true; + } + + public static boolean isFileHashRecent(byte[] hash, byte[] signature, long now, long cleanupAfter) throws DataException { + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + if (arbitraryDataFile == null || !arbitraryDataFile.exists()) { + // No hash, or file doesn't exist, so it's not recent + return false; + } + + Path filePath = arbitraryDataFile.getFilePath(); + return ArbitraryTransactionUtils.isFileRecent(filePath, now, cleanupAfter); + } + + public static void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException { + byte[] completeHash = arbitraryTransactionData.getData(); + byte[] signature = arbitraryTransactionData.getSignature(); + + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature); + + if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, signature, now, cleanupAfter)) { + LOGGER.info("Deleting file {} because it can be rebuilt from chunks " + + "if needed", Base58.encode(completeHash)); + + arbitraryDataFile.delete(); + } + } + + public static void deleteCompleteFileAndChunks(ArbitraryTransactionData arbitraryTransactionData) throws DataException { + byte[] completeHash = arbitraryTransactionData.getData(); + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + byte[] signature = arbitraryTransactionData.getSignature(); + + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature); + arbitraryDataFile.setMetadataHash(metadataHash); + arbitraryDataFile.deleteAll(); + } + + public static void convertFileToChunks(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException { + byte[] completeHash = arbitraryTransactionData.getData(); + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + byte[] signature = arbitraryTransactionData.getSignature(); + + // Find the expected chunk hashes + ArbitraryDataFile expectedDataFile = ArbitraryDataFile.fromHash(completeHash, signature); + expectedDataFile.setMetadataHash(metadataHash); + + if (metadataHash == null || !expectedDataFile.getMetadataFile().exists()) { + // We don't have the metadata file, or this transaction doesn't have one - nothing to do + return; + } + + // Split the file into chunks + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature); + int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE); + if (chunkCount > 1) { + LOGGER.info(String.format("Successfully split %s into %d chunk%s", + Base58.encode(completeHash), chunkCount, (chunkCount == 1 ? "" : "s"))); + + // Verify that the chunk hashes match those in the transaction + byte[] chunkHashes = expectedDataFile.chunkHashes(); + if (chunkHashes != null && Arrays.equals(chunkHashes, arbitraryDataFile.chunkHashes())) { + // Ensure they exist on disk + if (arbitraryDataFile.allChunksExist()) { + + // Now delete the original file if it's not recent + if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, signature, now, cleanupAfter)) { + LOGGER.info("Deleting file {} because it can now be rebuilt from " + + "chunks if needed", Base58.encode(completeHash)); + + ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, cleanupAfter); + } + else { + // File might be in use. It's best to leave it and it it will be cleaned up later. + } + } + } + } + } + + /** + * When first uploaded, files go into a _misc folder as they are not yet associated with a + * transaction signature. Once the transaction is broadcast, they need to be moved to the + * correct location, keyed by the transaction signature. + * @param arbitraryTransactionData + * @return + * @throws DataException + */ + public static int checkAndRelocateMiscFiles(ArbitraryTransactionData arbitraryTransactionData) { + int filesRelocatedCount = 0; + + try { + // Load hashes + byte[] digest = arbitraryTransactionData.getData(); + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + + // Load signature + byte[] signature = arbitraryTransactionData.getSignature(); + + // Check if any files for this transaction exist in the misc folder + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, null); + arbitraryDataFile.setMetadataHash(metadataHash); + + if (arbitraryDataFile.anyChunksExist()) { + // At least one chunk exists in the misc folder - move them + for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) { + if (chunk.exists()) { + // Determine the correct path by initializing a new ArbitraryDataFile instance with the signature + ArbitraryDataFile newChunk = ArbitraryDataFile.fromHash(chunk.getHash(), signature); + Path oldPath = chunk.getFilePath(); + Path newPath = newChunk.getFilePath(); + + // Ensure parent directories exist, then copy the file + LOGGER.info("Relocating chunk from {} to {}...", oldPath, newPath); + Files.createDirectories(newPath.getParent()); + Files.move(oldPath, newPath, REPLACE_EXISTING); + filesRelocatedCount++; + + // Delete empty parent directories + FilesystemUtils.safeDeleteEmptyParentDirectories(oldPath); + } + } + } + // Also move the complete file if it exists + if (arbitraryDataFile.exists()) { + // Determine the correct path by initializing a new ArbitraryDataFile instance with the signature + ArbitraryDataFile newCompleteFile = ArbitraryDataFile.fromHash(arbitraryDataFile.getHash(), signature); + Path oldPath = arbitraryDataFile.getFilePath(); + Path newPath = newCompleteFile.getFilePath(); + + // Ensure parent directories exist, then copy the file + LOGGER.info("Relocating complete file from {} to {}...", oldPath, newPath); + Files.createDirectories(newPath.getParent()); + Files.move(oldPath, newPath, REPLACE_EXISTING); + filesRelocatedCount++; + + // Delete empty parent directories + FilesystemUtils.safeDeleteEmptyParentDirectories(oldPath); + } + + // Also move the metadata file if it exists + if (arbitraryDataFile.getMetadataFile() != null && arbitraryDataFile.getMetadataFile().exists()) { + // Determine the correct path by initializing a new ArbitraryDataFile instance with the signature + ArbitraryDataFile newCompleteFile = ArbitraryDataFile.fromHash(arbitraryDataFile.getMetadataHash(), signature); + Path oldPath = arbitraryDataFile.getMetadataFile().getFilePath(); + Path newPath = newCompleteFile.getFilePath(); + + // Ensure parent directories exist, then copy the file + LOGGER.info("Relocating metadata file from {} to {}...", oldPath, newPath); + Files.createDirectories(newPath.getParent()); + Files.move(oldPath, newPath, REPLACE_EXISTING); + filesRelocatedCount++; + + // Delete empty parent directories + FilesystemUtils.safeDeleteEmptyParentDirectories(oldPath); + } + + // If at least one file was relocated, we can assume that the data from this transaction + // originated from this node + if (filesRelocatedCount > 0) { + if (Settings.getInstance().isOriginalCopyIndicatorFileEnabled()) { + // Create a file in the same directory, to indicate that this is the original copy + LOGGER.info("Creating original copy indicator file..."); + ArbitraryDataFile completeFile = ArbitraryDataFile.fromHash(arbitraryDataFile.getHash(), signature); + Path parentDirectory = completeFile.getFilePath().getParent(); + File file = Paths.get(parentDirectory.toString(), ".original").toFile(); + file.createNewFile(); + } + } + } + catch (DataException | IOException e) { + LOGGER.info("Unable to check and relocate all files for signature {}: {}", + Base58.encode(arbitraryTransactionData.getSignature()), e.getMessage()); + } + + return filesRelocatedCount; + } + +} diff --git a/src/main/java/org/qortal/utils/EnumUtils.java b/src/main/java/org/qortal/utils/EnumUtils.java new file mode 100644 index 00000000..9a486b11 --- /dev/null +++ b/src/main/java/org/qortal/utils/EnumUtils.java @@ -0,0 +1,15 @@ +package org.qortal.utils; + +import java.util.Arrays; + +public class EnumUtils { + + public static String[] getNames(Class> e) { + return Arrays.stream(e.getEnumConstants()).map(Enum::name).toArray(String[]::new); + } + + public static String getNames(Class> e, String delimiter) { + return String.join(delimiter, EnumUtils.getNames(e)); + } + +} diff --git a/src/main/java/org/qortal/utils/FilesystemUtils.java b/src/main/java/org/qortal/utils/FilesystemUtils.java new file mode 100644 index 00000000..44648699 --- /dev/null +++ b/src/main/java/org/qortal/utils/FilesystemUtils.java @@ -0,0 +1,263 @@ +package org.qortal.utils; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.ArrayUtils; +import org.qortal.settings.Settings; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.nio.file.*; + +public class FilesystemUtils { + + public static boolean isDirectoryEmpty(Path path) throws IOException { + if (Files.isDirectory(path)) { + try (DirectoryStream directory = Files.newDirectoryStream(path)) { + return !directory.iterator().hasNext(); + } + } + + return false; + } + + public static void copyAndReplaceDirectory(String sourceDirectoryLocation, String destinationDirectoryLocation) throws IOException { + // Ensure parent folders exist in the destination + File destFile = new File(destinationDirectoryLocation); + if (destFile != null) { + destFile.mkdirs(); + } + if (destFile == null || !destFile.exists()) { + throw new IOException("Destination directory doesn't exist"); + } + + // If the destination directory isn't empty, delete its contents + if (!FilesystemUtils.isDirectoryEmpty(destFile.toPath())) { + FileUtils.deleteDirectory(destFile); + destFile.mkdirs(); + } + + Files.walk(Paths.get(sourceDirectoryLocation)) + .forEach(source -> { + Path destination = Paths.get(destinationDirectoryLocation, source.toString() + .substring(sourceDirectoryLocation.length())); + try { + Files.copy(source, destination, StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + e.printStackTrace(); + } + }); + } + + + /** + * moveFile + * Allows files to be moved between filesystems + * + * @param source + * @param dest + * @param cleanup + * @throws IOException + */ + public static void moveFile(Path source, Path dest, boolean cleanup) throws IOException { + if (source.compareTo(dest) == 0) { + // Source path matches destination path already + return; + } + + File sourceFile = new File(source.toString()); + if (sourceFile == null || !sourceFile.exists()) { + throw new IOException("Source file doesn't exist"); + } + if (!sourceFile.isFile()) { + throw new IOException("Source isn't a file"); + } + + // Ensure parent folders exist in the destination + File destFile = new File(dest.toString()); + File destParentFile = destFile.getParentFile(); + if (destParentFile != null) { + destParentFile.mkdirs(); + } + if (destParentFile == null || !destParentFile.exists()) { + throw new IOException("Destination directory doesn't exist"); + } + + // Copy to destination + Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING); + + // Delete existing + if (FilesystemUtils.pathInsideDataOrTempPath(source)) { + Files.delete(source); + } + + if (cleanup) { + // ... and delete its parent directory if empty + Path parentDirectory = source.getParent(); + if (FilesystemUtils.pathInsideDataOrTempPath(parentDirectory)) { + Files.deleteIfExists(parentDirectory); + } + } + } + + /** + * moveDirectory + * Allows directories to be moved between filesystems + * + * @param source + * @param dest + * @param cleanup + * @throws IOException + */ + public static void moveDirectory(Path source, Path dest, boolean cleanup) throws IOException { + if (source.compareTo(dest) == 0) { + // Source path matches destination path already + return; + } + + File sourceFile = new File(source.toString()); + File destFile = new File(dest.toString()); + if (sourceFile == null || !sourceFile.exists()) { + throw new IOException("Source directory doesn't exist"); + } + if (!sourceFile.isDirectory()) { + throw new IOException("Source isn't a directory"); + } + + // Ensure parent folders exist in the destination + destFile.mkdirs(); + if (destFile == null || !destFile.exists()) { + throw new IOException("Destination directory doesn't exist"); + } + + // Copy to destination + FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString()); + + // Delete existing + if (FilesystemUtils.pathInsideDataOrTempPath(source)) { + File directory = new File(source.toString()); + System.out.println(String.format("Deleting directory %s", directory.toString())); + FileUtils.deleteDirectory(directory); + } + + if (cleanup) { + // ... and delete its parent directory if empty + Path parentDirectory = source.getParent(); + if (FilesystemUtils.pathInsideDataOrTempPath(parentDirectory)) { + Files.deleteIfExists(parentDirectory); + } + } + } + + public static void safeDeleteDirectory(Path path, boolean cleanup) throws IOException { + // Delete path, if it exists in our data/temp directory + if (FilesystemUtils.pathInsideDataOrTempPath(path)) { + File directory = new File(path.toString()); + FileUtils.deleteDirectory(directory); + } + + if (cleanup) { + // Delete the parent directories if they are empty (and exist in our data/temp directory) + FilesystemUtils.safeDeleteEmptyParentDirectories(path); + } + } + + public static void safeDeleteEmptyParentDirectories(Path path) throws IOException { + final Path parentPath = path.toAbsolutePath().getParent(); + if (!parentPath.toFile().isDirectory()) { + return; + } + if (!FilesystemUtils.pathInsideDataOrTempPath(parentPath)) { + return; + } + try { + Files.deleteIfExists(parentPath); + + } catch (DirectoryNotEmptyException e) { + // We've reached the limits of what we can delete + return; + } + + FilesystemUtils.safeDeleteEmptyParentDirectories(parentPath); + } + + public static boolean pathInsideDataOrTempPath(Path path) { + if (path == null) { + return false; + } + Path dataPath = Paths.get(Settings.getInstance().getDataPath()).toAbsolutePath(); + Path tempDataPath = Paths.get(Settings.getInstance().getTempDataPath()).toAbsolutePath(); + Path absolutePath = path.toAbsolutePath(); + if (absolutePath.startsWith(dataPath) || absolutePath.startsWith(tempDataPath)) { + return true; + } + return false; + } + + public static boolean isChild(Path child, Path parent) { + return child.toAbsolutePath().startsWith(parent.toAbsolutePath()); + } + + public static long getDirectorySize(Path path) throws IOException { + if (path == null || !Files.exists(path)) { + return 0L; + } + return Files.walk(path) + .filter(p -> p.toFile().isFile()) + .mapToLong(p -> p.toFile().length()) + .sum(); + } + + + /** + * getSingleFileContents + * Return the content of the file at given path. + * If the path is a directory, the contents will be returned + * only if it contains a single file. + * + * @param path + * @return + * @throws IOException + */ + public static byte[] getSingleFileContents(Path path) throws IOException { + byte[] data = null; + // TODO: limit the file size that can be loaded into memory + + // If the path is a file, read the contents directly + if (path.toFile().isFile()) { + data = Files.readAllBytes(path); + } + + // Or if it's a directory, only load file contents if there is a single file inside it + else if (path.toFile().isDirectory()) { + String[] files = ArrayUtils.removeElement(path.toFile().list(), ".qortal"); + if (files.length == 1) { + Path filePath = Paths.get(path.toString(), files[0]); + data = Files.readAllBytes(filePath); + } + } + + return data; + } + + public static byte[] readFromFile(String filePath, long position, int size) throws IOException { + RandomAccessFile file = new RandomAccessFile(filePath, "r"); + file.seek(position); + byte[] bytes = new byte[size]; + file.read(bytes); + file.close(); + return bytes; + } + + public static String readUtf8StringFromFile(String filePath, long position, int size) throws IOException { + return new String(FilesystemUtils.readFromFile(filePath, position, size), StandardCharsets.UTF_8); + } + + public static boolean fileEndsWithNewline(Path path) throws IOException { + long length = Files.size(path); + String lastCharacter = FilesystemUtils.readUtf8StringFromFile(path.toString(), length-1, 1); + return (lastCharacter.equals("\n") || lastCharacter.equals("\r")); + } + +} diff --git a/src/main/java/org/qortal/utils/LoggingUtils.java b/src/main/java/org/qortal/utils/LoggingUtils.java new file mode 100644 index 00000000..cb8af45f --- /dev/null +++ b/src/main/java/org/qortal/utils/LoggingUtils.java @@ -0,0 +1,31 @@ +package org.qortal.utils; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +public class LoggingUtils { + + public static void fixLegacyLog4j2Properties() { + Path log4j2PropertiesPath = Paths.get("log4j2.properties"); + if (Files.exists(log4j2PropertiesPath)) { + try { + String content = FileUtils.readFileToString(log4j2PropertiesPath.toFile(), "UTF-8"); + if (content.contains("${dirname:-}")) { + content = content.replace("${dirname:-}", "./"); + FileUtils.writeStringToFile(log4j2PropertiesPath.toFile(), content, "UTF-8"); + + // Force reload the log4j2.properties file + ((org.apache.logging.log4j.core.LoggerContext) LogManager.getContext(false)).reconfigure(); + } + } catch (IOException e) { + // Not much we can do here + } + } + } + +} diff --git a/src/main/java/org/qortal/utils/Serialization.java b/src/main/java/org/qortal/utils/Serialization.java index e9bf6e0e..8c3c43ed 100644 --- a/src/main/java/org/qortal/utils/Serialization.java +++ b/src/main/java/org/qortal/utils/Serialization.java @@ -101,9 +101,17 @@ public class Serialization { } public static void serializeSizedString(ByteArrayOutputStream bytes, String string) throws UnsupportedEncodingException, IOException { - byte[] stringBytes = string.getBytes(StandardCharsets.UTF_8); - bytes.write(Ints.toByteArray(stringBytes.length)); - bytes.write(stringBytes); + byte[] stringBytes = null; + int stringBytesLength = 0; + + if (string != null) { + stringBytes = string.getBytes(StandardCharsets.UTF_8); + stringBytesLength = stringBytes.length; + } + bytes.write(Ints.toByteArray(stringBytesLength)); + if (stringBytesLength > 0) { + bytes.write(stringBytes); + } } public static String deserializeSizedString(ByteBuffer byteBuffer, int maxSize) throws TransformationException { @@ -114,6 +122,9 @@ public class Serialization { if (size > byteBuffer.remaining()) throw new TransformationException("Byte data too short for serialized string"); + if (size == 0) + return null; + byte[] bytes = new byte[size]; byteBuffer.get(bytes); diff --git a/src/main/java/org/qortal/utils/ZipUtils.java b/src/main/java/org/qortal/utils/ZipUtils.java new file mode 100644 index 00000000..c61723e7 --- /dev/null +++ b/src/main/java/org/qortal/utils/ZipUtils.java @@ -0,0 +1,139 @@ +/* + * MIT License + * + * Copyright (c) 2017 Eugen Paraschiv + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Code modified in 2021 for Qortal Core + * + */ + +package org.qortal.utils; + +import org.qortal.controller.Controller; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + +public class ZipUtils { + + public static void zip(String sourcePath, String destFilePath, String enclosingFolderName) throws IOException, InterruptedException { + File sourceFile = new File(sourcePath); + boolean isSingleFile = Paths.get(sourcePath).toFile().isFile(); + FileOutputStream fileOutputStream = new FileOutputStream(destFilePath); + ZipOutputStream zipOutputStream = new ZipOutputStream(fileOutputStream); + ZipUtils.zip(sourceFile, enclosingFolderName, zipOutputStream, isSingleFile); + zipOutputStream.close(); + fileOutputStream.close(); + } + + public static void zip(final File fileToZip, final String enclosingFolderName, final ZipOutputStream zipOut, boolean isSingleFile) throws IOException, InterruptedException { + if (Controller.isStopping()) { + throw new InterruptedException("Controller is stopping"); + } + + // Handle single file resources slightly differently + if (isSingleFile) { + // Create enclosing folder + zipOut.putNextEntry(new ZipEntry(enclosingFolderName + "/")); + zipOut.closeEntry(); + // Place the supplied file within the folder + ZipUtils.zip(fileToZip, enclosingFolderName + "/" + fileToZip.getName(), zipOut, false); + return; + } + + if (fileToZip.isDirectory()) { + if (enclosingFolderName.endsWith("/")) { + zipOut.putNextEntry(new ZipEntry(enclosingFolderName)); + zipOut.closeEntry(); + } else { + zipOut.putNextEntry(new ZipEntry(enclosingFolderName + "/")); + zipOut.closeEntry(); + } + final File[] children = fileToZip.listFiles(); + for (final File childFile : children) { + ZipUtils.zip(childFile, enclosingFolderName + "/" + childFile.getName(), zipOut, false); + } + return; + } + final FileInputStream fis = new FileInputStream(fileToZip); + final ZipEntry zipEntry = new ZipEntry(enclosingFolderName); + zipOut.putNextEntry(zipEntry); + final byte[] bytes = new byte[1024]; + int length; + while ((length = fis.read(bytes)) >= 0) { + zipOut.write(bytes, 0, length); + } + fis.close(); + } + + public static void unzip(String sourcePath, String destPath) throws IOException { + final File destDir = new File(destPath); + final byte[] buffer = new byte[1024]; + final ZipInputStream zis = new ZipInputStream(new FileInputStream(sourcePath)); + ZipEntry zipEntry = zis.getNextEntry(); + while (zipEntry != null) { + final File newFile = ZipUtils.newFile(destDir, zipEntry); + if (zipEntry.isDirectory()) { + if (!newFile.isDirectory() && !newFile.mkdirs()) { + throw new IOException("Failed to create directory " + newFile); + } + } else { + File parent = newFile.getParentFile(); + if (!parent.isDirectory() && !parent.mkdirs()) { + throw new IOException("Failed to create directory " + parent); + } + + final FileOutputStream fos = new FileOutputStream(newFile); + int len; + while ((len = zis.read(buffer)) > 0) { + fos.write(buffer, 0, len); + } + fos.close(); + } + zipEntry = zis.getNextEntry(); + } + zis.closeEntry(); + zis.close(); + } + + /** + * See: https://snyk.io/research/zip-slip-vulnerability + */ + public static File newFile(File destinationDir, ZipEntry zipEntry) throws IOException { + File destFile = new File(destinationDir, zipEntry.getName()); + + String destDirPath = destinationDir.getCanonicalPath(); + String destFilePath = destFile.getCanonicalPath(); + + if (!destFilePath.startsWith(destDirPath + File.separator)) { + throw new IOException("Entry is outside of the target dir: " + zipEntry.getName()); + } + + return destFile; + } + +} diff --git a/src/main/resources/i18n/ApiError_en.properties b/src/main/resources/i18n/ApiError_en.properties index 6f9b1d01..dfe73eef 100644 --- a/src/main/resources/i18n/ApiError_en.properties +++ b/src/main/resources/i18n/ApiError_en.properties @@ -80,4 +80,8 @@ ORDER_SIZE_TOO_SMALL = order amount too low ### Data ### FILE_NOT_FOUND = file not found -NO_REPLY = peer did not reply with data \ No newline at end of file +ORDER_SIZE_TOO_SMALL = order size too small + +FILE_NOT_FOUND = file not found + +NO_REPLY = peer didn't reply within the allowed time diff --git a/src/main/resources/i18n/TransactionValidity_en.properties b/src/main/resources/i18n/TransactionValidity_en.properties index 17a52647..7c4d18a1 100644 --- a/src/main/resources/i18n/TransactionValidity_en.properties +++ b/src/main/resources/i18n/TransactionValidity_en.properties @@ -180,7 +180,9 @@ INCORRECT_NONCE = incorrect PoW nonce INVALID_TIMESTAMP_SIGNATURE = invalid timestamp signature -ADDRESS_IN_BLACKLIST = this address is in your blacklist +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit @@ -188,4 +190,4 @@ DUPLICATE_MESSAGE = address sent duplicate message INVALID_BUT_OK = invalid but OK -NOT_YET_RELEASED = feature not yet released \ No newline at end of file +NOT_YET_RELEASED = feature not yet released diff --git a/src/main/resources/i18n/TransactionValidity_fi.properties b/src/main/resources/i18n/TransactionValidity_fi.properties index adf7eb35..002ad560 100644 --- a/src/main/resources/i18n/TransactionValidity_fi.properties +++ b/src/main/resources/i18n/TransactionValidity_fi.properties @@ -180,7 +180,9 @@ TRANSACTION_UNKNOWN = tuntematon transaktio TX_GROUP_ID_MISMATCH = transaktion ryhmä-ID:n vastaavuusvirhe -ADDRESS_IN_BLACKLIST = this address is in your blacklist +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit @@ -188,4 +190,4 @@ DUPLICATE_MESSAGE = address sent duplicate message INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature -INVALID_BUT_OK = Invalid but OK \ No newline at end of file +INVALID_BUT_OK = Invalid but OK diff --git a/src/main/resources/i18n/TransactionValidity_hu.properties b/src/main/resources/i18n/TransactionValidity_hu.properties index 68950971..bb43e18f 100644 --- a/src/main/resources/i18n/TransactionValidity_hu.properties +++ b/src/main/resources/i18n/TransactionValidity_hu.properties @@ -182,7 +182,9 @@ INCORRECT_NONCE = helytelen Proof-of-Work Nonce INVALID_TIMESTAMP_SIGNATURE = érvénytelen időbélyeg aláírás -ADDRESS_IN_BLACKLIST = ez a fiókcím a fekete listádon van +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = ez a cím elérte a megengedett mérték korlátot @@ -190,4 +192,4 @@ DUPLICATE_MESSAGE = ez a cím duplikált üzenetet küldött INVALID_BUT_OK = érvénytelen de elfogadva -NOT_YET_RELEASED = ez a funkció még nem került kiadásra \ No newline at end of file +NOT_YET_RELEASED = ez a funkció még nem került kiadásra diff --git a/src/main/resources/i18n/TransactionValidity_it.properties b/src/main/resources/i18n/TransactionValidity_it.properties index 62d1608b..762f0865 100644 --- a/src/main/resources/i18n/TransactionValidity_it.properties +++ b/src/main/resources/i18n/TransactionValidity_it.properties @@ -182,7 +182,9 @@ TRANSACTION_UNKNOWN = transazione sconosciuta TX_GROUP_ID_MISMATCH = identificazione di gruppo della transazione non corrisponde -ADDRESS_IN_BLACKLIST = this address is in your blacklist +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit @@ -190,4 +192,4 @@ DUPLICATE_MESSAGE = address sent duplicate message INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature -INVALID_BUT_OK = Invalid but OK \ No newline at end of file +INVALID_BUT_OK = Invalid but OK diff --git a/src/main/resources/i18n/TransactionValidity_nl.properties b/src/main/resources/i18n/TransactionValidity_nl.properties index d6191f86..726af0a9 100644 --- a/src/main/resources/i18n/TransactionValidity_nl.properties +++ b/src/main/resources/i18n/TransactionValidity_nl.properties @@ -180,7 +180,9 @@ TRANSACTION_UNKNOWN = transactie onbekend TX_GROUP_ID_MISMATCH = groep-ID van transactie matcht niet -ADDRESS_IN_BLACKLIST = this address is in your blacklist +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit @@ -188,4 +190,4 @@ DUPLICATE_MESSAGE = address sent duplicate message INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature -INVALID_BUT_OK = Invalid but OK \ No newline at end of file +INVALID_BUT_OK = Invalid but OK diff --git a/src/main/resources/i18n/TransactionValidity_ru.properties b/src/main/resources/i18n/TransactionValidity_ru.properties index e8761e7b..86e9d37a 100644 --- a/src/main/resources/i18n/TransactionValidity_ru.properties +++ b/src/main/resources/i18n/TransactionValidity_ru.properties @@ -174,7 +174,9 @@ TRANSACTION_UNKNOWN = неизвестная транзакция TX_GROUP_ID_MISMATCH = не соответствие идентификатора группы c хэш транзации -ADDRESS_IN_BLACKLIST = this address is in your blacklist +ADDRESS_BLOCKED = this address is blocked + +NAME_BLOCKED = this name is blocked ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit @@ -182,4 +184,4 @@ DUPLICATE_MESSAGE = address sent duplicate message INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature -INVALID_BUT_OK = Invalid but OK \ No newline at end of file +INVALID_BUT_OK = Invalid but OK diff --git a/src/main/resources/loading/index.html b/src/main/resources/loading/index.html new file mode 100644 index 00000000..6f234c45 --- /dev/null +++ b/src/main/resources/loading/index.html @@ -0,0 +1,267 @@ + + + + + Loading... + + + + + + + + + + + + + + +
+
+

Loading

+

+ Files are being retrieved from the Qortal Data Network. + This page will refresh automatically when the content becomes available. +

+

Loading...

+
+
+ + + diff --git a/src/test/java/org/qortal/test/CryptoTests.java b/src/test/java/org/qortal/test/CryptoTests.java index 3a76b9f3..6a0133d2 100644 --- a/src/test/java/org/qortal/test/CryptoTests.java +++ b/src/test/java/org/qortal/test/CryptoTests.java @@ -3,6 +3,7 @@ package org.qortal.test; import org.junit.Test; import org.qortal.account.PrivateKeyAccount; import org.qortal.block.BlockChain; +import org.qortal.crypto.AES; import org.qortal.crypto.BouncyCastle25519; import org.qortal.crypto.Crypto; import org.qortal.test.common.Common; @@ -10,11 +11,18 @@ import org.qortal.utils.Base58; import static org.junit.Assert.*; +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardOpenOption; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; +import java.util.Arrays; import java.util.Random; import org.bouncycastle.crypto.agreement.X25519Agreement; @@ -25,6 +33,11 @@ import org.bouncycastle.crypto.params.X25519PublicKeyParameters; import com.google.common.hash.HashCode; +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; + public class CryptoTests extends Common { @Test @@ -291,4 +304,68 @@ public class CryptoTests extends Common { assertEquals(expectedProxyPrivateKey, Base58.encode(proxyPrivateKey)); } + + @Test + public void testAESFileEncryption() throws NoSuchAlgorithmException, IOException, IllegalBlockSizeException, + InvalidKeyException, BadPaddingException, InvalidAlgorithmParameterException, NoSuchPaddingException { + + // Create temporary directory and file paths + java.nio.file.Path tempDir = Files.createTempDirectory("qortal-tests"); + String inputFilePath = tempDir.toString() + File.separator + "inputFile"; + String outputFilePath = tempDir.toString() + File.separator + "outputFile"; + String decryptedFilePath = tempDir.toString() + File.separator + "decryptedFile"; + String reencryptedFilePath = tempDir.toString() + File.separator + "reencryptedFile"; + + // Generate some dummy data + byte[] randomBytes = new byte[1024]; + new Random().nextBytes(randomBytes); + + // Write it to the input file + FileOutputStream outputStream = new FileOutputStream(inputFilePath); + outputStream.write(randomBytes); + + // Make sure only the input file exists + assertTrue(Files.exists(Paths.get(inputFilePath))); + assertFalse(Files.exists(Paths.get(outputFilePath))); + + // Encrypt + SecretKey aesKey = AES.generateKey(256); + AES.encryptFile("AES", aesKey, inputFilePath, outputFilePath); + assertTrue(Files.exists(Paths.get(outputFilePath))); + byte[] encryptedBytes = Files.readAllBytes(Paths.get(outputFilePath)); + + // Delete the input file + Files.delete(Paths.get(inputFilePath)); + assertFalse(Files.exists(Paths.get(inputFilePath))); + + // Decrypt + String encryptedFilePath = outputFilePath; + assertFalse(Files.exists(Paths.get(decryptedFilePath))); + AES.decryptFile("AES", aesKey, encryptedFilePath, decryptedFilePath); + assertTrue(Files.exists(Paths.get(decryptedFilePath))); + + // Delete the output file + Files.delete(Paths.get(outputFilePath)); + assertFalse(Files.exists(Paths.get(outputFilePath))); + + // Check that the decrypted file contents matches the original data + byte[] decryptedBytes = Files.readAllBytes(Paths.get(decryptedFilePath)); + assertTrue(Arrays.equals(decryptedBytes, randomBytes)); + assertEquals(1024, decryptedBytes.length); + + // Write the original data back to the input file + outputStream = new FileOutputStream(inputFilePath); + outputStream.write(randomBytes); + + // Now encrypt the data one more time using the same key + // This is to ensure the initialization vector produces a different result + AES.encryptFile("AES", aesKey, inputFilePath, reencryptedFilePath); + assertTrue(Files.exists(Paths.get(reencryptedFilePath))); + + // Make sure the ciphertexts do not match + byte[] reencryptedBytes = Files.readAllBytes(Paths.get(reencryptedFilePath)); + assertFalse(Arrays.equals(encryptedBytes, reencryptedBytes)); + + } + } diff --git a/src/test/java/org/qortal/test/SerializationTests.java b/src/test/java/org/qortal/test/SerializationTests.java index 15641331..5a9fffa8 100644 --- a/src/test/java/org/qortal/test/SerializationTests.java +++ b/src/test/java/org/qortal/test/SerializationTests.java @@ -38,7 +38,6 @@ public class SerializationTests extends Common { } @Test - @Ignore(value = "Doesn't work, to be fixed later") public void testTransactions() throws DataException, TransformationException { try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount signingAccount = Common.getTestAccount(repository, "alice"); @@ -174,4 +173,4 @@ public class SerializationTests extends Common { assertEqualBigDecimals("Deserialized BigDecimal has incorrect value", amount, newAmount); } -} \ No newline at end of file +} diff --git a/src/test/java/org/qortal/test/api/AdminApiTests.java b/src/test/java/org/qortal/test/api/AdminApiTests.java index 4aa2ca3b..8fb8bb52 100644 --- a/src/test/java/org/qortal/test/api/AdminApiTests.java +++ b/src/test/java/org/qortal/test/api/AdminApiTests.java @@ -5,12 +5,19 @@ import static org.junit.Assert.*; import org.junit.Before; import org.junit.Test; import org.qortal.api.resource.AdminResource; +import org.qortal.repository.DataException; import org.qortal.test.common.ApiCommon; +import org.qortal.test.common.Common; public class AdminApiTests extends ApiCommon { private AdminResource adminResource; + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + @Before public void buildResource() { this.adminResource = (AdminResource) ApiCommon.buildResource(AdminResource.class); diff --git a/src/test/java/org/qortal/test/api/ArbitraryApiTests.java b/src/test/java/org/qortal/test/api/ArbitraryApiTests.java index 2b80fb51..e4f27db6 100644 --- a/src/test/java/org/qortal/test/api/ArbitraryApiTests.java +++ b/src/test/java/org/qortal/test/api/ArbitraryApiTests.java @@ -6,6 +6,7 @@ import org.junit.Before; import org.junit.Test; import org.qortal.api.resource.ArbitraryResource; import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.misc.Service; import org.qortal.test.common.ApiCommon; public class ArbitraryApiTests extends ApiCommon { @@ -22,22 +23,24 @@ public class ArbitraryApiTests extends ApiCommon { Integer[] startingBlocks = new Integer[] { null, 0, 1, 999999999 }; Integer[] blockLimits = new Integer[] { null, 0, 1, 999999999 }; Integer[] txGroupIds = new Integer[] { null, 0, 1, 999999999 }; - Integer[] services = new Integer[] { null, 0, 1, 999999999 }; + Service[] services = new Service[] { Service.WEBSITE, Service.GIT_REPOSITORY, Service.BLOG_COMMENT }; + String[] names = new String[] { null, "Test" }; String[] addresses = new String[] { null, this.aliceAddress }; ConfirmationStatus[] confirmationStatuses = new ConfirmationStatus[] { ConfirmationStatus.UNCONFIRMED, ConfirmationStatus.CONFIRMED, ConfirmationStatus.BOTH }; for (Integer startBlock : startingBlocks) for (Integer blockLimit : blockLimits) for (Integer txGroupId : txGroupIds) - for (Integer service : services) - for (String address : addresses) - for (ConfirmationStatus confirmationStatus : confirmationStatuses) { - if (confirmationStatus != ConfirmationStatus.CONFIRMED && (startBlock != null || blockLimit != null)) - continue; + for (Service service : services) + for (String name : names) + for (String address : addresses) + for (ConfirmationStatus confirmationStatus : confirmationStatuses) { + if (confirmationStatus != ConfirmationStatus.CONFIRMED && (startBlock != null || blockLimit != null)) + continue; - assertNotNull(this.arbitraryResource.searchTransactions(startBlock, blockLimit, txGroupId, service, address, confirmationStatus, 20, null, null)); - assertNotNull(this.arbitraryResource.searchTransactions(startBlock, blockLimit, txGroupId, service, address, confirmationStatus, 1, 1, true)); - } + assertNotNull(this.arbitraryResource.searchTransactions(startBlock, blockLimit, txGroupId, service, name, address, confirmationStatus, 20, null, null)); + assertNotNull(this.arbitraryResource.searchTransactions(startBlock, blockLimit, txGroupId, service, name, address, confirmationStatus, 1, 1, true)); + } } } diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryCompressionTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryCompressionTests.java new file mode 100644 index 00000000..dd482074 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryCompressionTests.java @@ -0,0 +1,182 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.arbitrary.ArbitraryDataDigest; +import org.qortal.crypto.Crypto; +import org.qortal.repository.DataException; +import org.qortal.test.common.Common; +import org.qortal.utils.ZipUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Random; + +import static org.junit.Assert.*; + +public class ArbitraryCompressionTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testZipSingleFile() throws IOException, InterruptedException { + String enclosingFolderName = "data"; + Path inputFile = Files.createTempFile("inputFile", null); + Path outputDirectory = Files.createTempDirectory("outputDirectory"); + Path outputFile = Paths.get(outputDirectory.toString(), enclosingFolderName); + inputFile.toFile().deleteOnExit(); + outputDirectory.toFile().deleteOnExit(); + + // Write random data to the input file + byte[] data = new byte[1024]; + new Random().nextBytes(data); + Files.write(inputFile, data, StandardOpenOption.CREATE); + + assertTrue(Files.exists(inputFile)); + assertFalse(Files.exists(outputFile)); + + // Zip... + ZipUtils.zip(inputFile.toString(), outputFile.toString(), enclosingFolderName); + + assertTrue(Files.exists(inputFile)); + assertTrue(Files.exists(outputFile)); + + // Ensure zipped file's hash differs from the original + assertFalse(Arrays.equals(Crypto.digest(inputFile.toFile()), Crypto.digest(outputFile.toFile()))); + + // Create paths for unzipping + Path unzippedDirectory = Files.createTempDirectory("unzippedDirectory"); + // Single file data is unzipped directly, without an enclosing folder. Original name is maintained. + Path unzippedFile = Paths.get(unzippedDirectory.toString(), enclosingFolderName, inputFile.getFileName().toString()); + unzippedDirectory.toFile().deleteOnExit(); + assertFalse(Files.exists(unzippedFile)); + + // Now unzip... + ZipUtils.unzip(outputFile.toString(), unzippedDirectory.toString()); + + // Ensure resulting file exists + assertTrue(Files.exists(unzippedFile)); + + // And make sure it matches the original input file + assertTrue(Arrays.equals(Crypto.digest(inputFile.toFile()), Crypto.digest(unzippedFile.toFile()))); + } + + @Test + public void testZipDirectoryWithSingleFile() throws IOException, InterruptedException, DataException { + String enclosingFolderName = "data"; + Path inputDirectory = Files.createTempDirectory("inputDirectory"); + Path outputDirectory = Files.createTempDirectory("outputDirectory"); + Path outputFile = Paths.get(outputDirectory.toString(), enclosingFolderName); + inputDirectory.toFile().deleteOnExit(); + outputDirectory.toFile().deleteOnExit(); + + Path inputFile = Paths.get(inputDirectory.toString(), "file"); + + // Write random data to a file + byte[] data = new byte[1024]; + new Random().nextBytes(data); + Files.write(inputFile, data, StandardOpenOption.CREATE); + + assertTrue(Files.exists(inputDirectory)); + assertTrue(Files.exists(inputFile)); + assertFalse(Files.exists(outputFile)); + + // Zip... + ZipUtils.zip(inputDirectory.toString(), outputFile.toString(), enclosingFolderName); + + assertTrue(Files.exists(inputDirectory)); + assertTrue(Files.exists(outputFile)); + + // Create paths for unzipping + Path unzippedDirectory = Files.createTempDirectory("unzippedDirectory"); + unzippedDirectory.toFile().deleteOnExit(); + Path unzippedFile = Paths.get(unzippedDirectory.toString(), enclosingFolderName, "file"); + assertFalse(Files.exists(unzippedFile)); + + // Now unzip... + ZipUtils.unzip(outputFile.toString(), unzippedDirectory.toString()); + + // Ensure resulting file exists + assertTrue(Files.exists(unzippedFile)); + + // And make sure they match the original input files + assertTrue(Arrays.equals(Crypto.digest(inputFile.toFile()), Crypto.digest(unzippedFile.toFile()))); + + // Unzipped files are placed within a folder named by the supplied enclosingFolderName + Path unzippedInnerDirectory = Paths.get(unzippedDirectory.toString(), enclosingFolderName); + + // Finally, make sure the directory digests match + ArbitraryDataDigest inputDirectoryDigest = new ArbitraryDataDigest(inputDirectory); + inputDirectoryDigest.compute(); + ArbitraryDataDigest unzippedDirectoryDigest = new ArbitraryDataDigest(unzippedInnerDirectory); + unzippedDirectoryDigest.compute(); + assertEquals(inputDirectoryDigest.getHash58(), unzippedDirectoryDigest.getHash58()); + } + + @Test + public void testZipMultipleFiles() throws IOException, InterruptedException, DataException { + String enclosingFolderName = "data"; + Path inputDirectory = Files.createTempDirectory("inputDirectory"); + Path outputDirectory = Files.createTempDirectory("outputDirectory"); + Path outputFile = Paths.get(outputDirectory.toString(), enclosingFolderName); + inputDirectory.toFile().deleteOnExit(); + outputDirectory.toFile().deleteOnExit(); + + Path inputFile1 = Paths.get(inputDirectory.toString(), "file1"); + Path inputFile2 = Paths.get(inputDirectory.toString(), "file2"); + + // Write random data to some files + byte[] data = new byte[1024]; + new Random().nextBytes(data); + Files.write(inputFile1, data, StandardOpenOption.CREATE); + Files.write(inputFile2, data, StandardOpenOption.CREATE); + + assertTrue(Files.exists(inputDirectory)); + assertTrue(Files.exists(inputFile1)); + assertTrue(Files.exists(inputFile2)); + assertFalse(Files.exists(outputFile)); + + // Zip... + ZipUtils.zip(inputDirectory.toString(), outputFile.toString(), enclosingFolderName); + + assertTrue(Files.exists(inputDirectory)); + assertTrue(Files.exists(outputFile)); + + // Create paths for unzipping + Path unzippedDirectory = Files.createTempDirectory("unzippedDirectory"); + unzippedDirectory.toFile().deleteOnExit(); + Path unzippedFile1 = Paths.get(unzippedDirectory.toString(), enclosingFolderName, "file1"); + Path unzippedFile2 = Paths.get(unzippedDirectory.toString(), enclosingFolderName, "file2"); + assertFalse(Files.exists(unzippedFile1)); + assertFalse(Files.exists(unzippedFile2)); + + // Now unzip... + ZipUtils.unzip(outputFile.toString(), unzippedDirectory.toString()); + + // Ensure resulting files exist + assertTrue(Files.exists(unzippedFile1)); + assertTrue(Files.exists(unzippedFile2)); + + // And make sure they match the original input files + assertTrue(Arrays.equals(Crypto.digest(inputFile1.toFile()), Crypto.digest(unzippedFile1.toFile()))); + assertTrue(Arrays.equals(Crypto.digest(inputFile2.toFile()), Crypto.digest(unzippedFile2.toFile()))); + + // Unzipped files are placed within a folder named by the supplied enclosingFolderName + Path unzippedInnerDirectory = Paths.get(unzippedDirectory.toString(), enclosingFolderName); + + // Finally, make sure the directory digests match + ArbitraryDataDigest inputDirectoryDigest = new ArbitraryDataDigest(inputDirectory); + inputDirectoryDigest.compute(); + ArbitraryDataDigest unzippedDirectoryDigest = new ArbitraryDataDigest(unzippedInnerDirectory); + unzippedDirectoryDigest.compute(); + assertEquals(inputDirectoryDigest.getHash58(), unzippedDirectoryDigest.getHash58()); + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataDigestTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataDigestTests.java new file mode 100644 index 00000000..1c8afc2e --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataDigestTests.java @@ -0,0 +1,58 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.arbitrary.ArbitraryDataDigest; +import org.qortal.repository.DataException; +import org.qortal.test.common.Common; + +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.UUID; + +import static org.junit.Assert.*; + +public class ArbitraryDataDigestTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testDirectoryDigest() throws IOException, DataException { + Path dataPath = Paths.get("src/test/resources/arbitrary/demo1"); + String expectedHash58 = "DKyMuonWKoneJqiVHgw26Vk1ytrZG9PGsE9xfBg3GKDp"; + + // Ensure directory exists + assertTrue(dataPath.toFile().exists()); + assertTrue(dataPath.toFile().isDirectory()); + + // Compute a hash + ArbitraryDataDigest digest = new ArbitraryDataDigest(dataPath); + digest.compute(); + assertEquals(expectedHash58, digest.getHash58()); + + // Write a random file to .qortal/cache to ensure it isn't being included in the digest function + // We exclude all .qortal files from the digest since they can be different with each build, and + // we only care about the actual user files + FileWriter fileWriter = new FileWriter(Paths.get(dataPath.toString(), ".qortal", "cache").toString()); + fileWriter.append(UUID.randomUUID().toString()); + fileWriter.close(); + + // Recompute the hash + digest = new ArbitraryDataDigest(dataPath); + digest.compute(); + assertEquals(expectedHash58, digest.getHash58()); + + // Now compute the hash 100 more times to ensure it's always the same + for (int i=0; i<100; i++) { + digest = new ArbitraryDataDigest(dataPath); + digest.compute(); + assertEquals(expectedHash58, digest.getHash58()); + } + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataFileTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataFileTests.java new file mode 100644 index 00000000..aabbe502 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataFileTests.java @@ -0,0 +1,77 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.repository.DataException; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.test.common.Common; + +import java.util.Random; + +import static org.junit.Assert.*; + +public class ArbitraryDataFileTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testSplitAndJoin() throws DataException { + String dummyDataString = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + ArbitraryDataFile arbitraryDataFile = new ArbitraryDataFile(dummyDataString.getBytes(), null); + assertTrue(arbitraryDataFile.exists()); + assertEquals(62, arbitraryDataFile.size()); + assertEquals("3eyjYjturyVe61grRX42bprGr3Cvw6ehTy4iknVnosDj", arbitraryDataFile.digest58()); + + // Split into 7 chunks, each 10 bytes long + arbitraryDataFile.split(10); + assertEquals(7, arbitraryDataFile.chunkCount()); + + // Delete the original file + arbitraryDataFile.delete(); + assertFalse(arbitraryDataFile.exists()); + assertEquals(0, arbitraryDataFile.size()); + + // Now rebuild the original file from the chunks + assertEquals(7, arbitraryDataFile.chunkCount()); + arbitraryDataFile.join(); + + // Validate that the original file is intact + assertTrue(arbitraryDataFile.exists()); + assertEquals(62, arbitraryDataFile.size()); + assertEquals("3eyjYjturyVe61grRX42bprGr3Cvw6ehTy4iknVnosDj", arbitraryDataFile.digest58()); + } + + @Test + public void testSplitAndJoinWithLargeFiles() throws DataException { + int fileSize = (int) (5.5f * 1024 * 1024); // 5.5MiB + byte[] randomData = new byte[fileSize]; + new Random().nextBytes(randomData); // No need for SecureRandom here + + ArbitraryDataFile arbitraryDataFile = new ArbitraryDataFile(randomData, null); + assertTrue(arbitraryDataFile.exists()); + assertEquals(fileSize, arbitraryDataFile.size()); + String originalFileDigest = arbitraryDataFile.digest58(); + + // Split into chunks using 1MiB chunk size + arbitraryDataFile.split(1 * 1024 * 1024); + assertEquals(6, arbitraryDataFile.chunkCount()); + + // Delete the original file + arbitraryDataFile.delete(); + assertFalse(arbitraryDataFile.exists()); + assertEquals(0, arbitraryDataFile.size()); + + // Now rebuild the original file from the chunks + assertEquals(6, arbitraryDataFile.chunkCount()); + arbitraryDataFile.join(); + + // Validate that the original file is intact + assertTrue(arbitraryDataFile.exists()); + assertEquals(fileSize, arbitraryDataFile.size()); + assertEquals(originalFileDigest, arbitraryDataFile.digest58()); + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataMergeTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataMergeTests.java new file mode 100644 index 00000000..a366ef12 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataMergeTests.java @@ -0,0 +1,445 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.arbitrary.ArbitraryDataCombiner; +import org.qortal.arbitrary.ArbitraryDataCreatePatch; +import org.qortal.arbitrary.ArbitraryDataDigest; +import org.qortal.crypto.Crypto; +import org.qortal.repository.DataException; +import org.qortal.test.common.ArbitraryUtils; +import org.qortal.test.common.Common; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Objects; +import java.util.Random; + +import static org.junit.Assert.*; + +public class ArbitraryDataMergeTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testCreateAndMergePatch() throws IOException, DataException { + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + + // Generate random signature for the purposes of validation + byte[] signature = new byte[32]; + new Random().nextBytes(signature); + + // Create a patch using the differences in path2 compared with path1 + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(path1, path2, signature); + patch.create(); + Path patchPath = patch.getFinalPath(); + assertTrue(Files.exists(patchPath)); + + // Check that lorem1, 2, 4, and 5 exist + assertTrue(Files.exists(Paths.get(patchPath.toString(), "lorem1.txt"))); + assertTrue(Files.exists(Paths.get(patchPath.toString(), "lorem2.txt"))); + assertTrue(Files.exists(Paths.get(patchPath.toString(), "dir1", "lorem4.txt"))); + assertTrue(Files.exists(Paths.get(patchPath.toString(), "dir1", "dir2", "lorem5.txt"))); + + // Ensure that lorem3 doesn't exist, as this file is identical in the original paths + assertFalse(Files.exists(Paths.get(patchPath.toString(), "lorem3.txt"))); + + // Ensure that the patch files differ from the first path (except for lorem3, which is missing) + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path1.toString(), "lorem1.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "lorem1.txt").toFile()) + )); + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path1.toString(), "lorem2.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "lorem2.txt").toFile()) + )); + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path1.toString(), "dir1", "lorem4.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "dir1", "lorem4.txt").toFile()) + )); + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path1.toString(), "dir1", "dir2", "lorem5.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "dir1", "dir2", "lorem5.txt").toFile()) + )); + + // Ensure that patch files 1 and 4 differ from the original files + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path2.toString(), "lorem1.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "lorem1.txt").toFile()) + )); + assertFalse(Arrays.equals( + Crypto.digest(Paths.get(path2.toString(), "dir1", "lorem4.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "dir1", "lorem4.txt").toFile()) + )); + + // Files 2 and 5 should match the original files, because their contents were + // too small to create a patch file smaller than the original file + assertArrayEquals( + Crypto.digest(Paths.get(path2.toString(), "lorem2.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "lorem2.txt").toFile()) + ); + assertArrayEquals( + Crypto.digest(Paths.get(path2.toString(), "dir1", "dir2", "lorem5.txt").toFile()), + Crypto.digest(Paths.get(patchPath.toString(), "dir1", "dir2", "lorem5.txt").toFile()) + ); + + // Now merge the patch with the original path + ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(path1, patchPath, signature); + combiner.setShouldValidateHashes(true); + combiner.combine(); + Path finalPath = combiner.getFinalPath(); + + // Ensure that all files exist in the final path (including lorem3) + assertTrue(Files.exists(Paths.get(finalPath.toString(), "lorem1.txt"))); + assertTrue(Files.exists(Paths.get(finalPath.toString(), "lorem2.txt"))); + assertTrue(Files.exists(Paths.get(finalPath.toString(), "lorem3.txt"))); + assertTrue(Files.exists(Paths.get(finalPath.toString(), "dir1", "lorem4.txt"))); + assertTrue(Files.exists(Paths.get(finalPath.toString(), "dir1", "dir2", "lorem5.txt"))); + + // Ensure that the files match those in path2 exactly + assertArrayEquals( + Crypto.digest(Paths.get(finalPath.toString(), "lorem1.txt").toFile()), + Crypto.digest(Paths.get(path2.toString(), "lorem1.txt").toFile()) + ); + assertArrayEquals( + Crypto.digest(Paths.get(finalPath.toString(), "lorem2.txt").toFile()), + Crypto.digest(Paths.get(path2.toString(), "lorem2.txt").toFile()) + ); + assertArrayEquals( + Crypto.digest(Paths.get(finalPath.toString(), "lorem3.txt").toFile()), + Crypto.digest(Paths.get(path2.toString(), "lorem3.txt").toFile()) + ); + assertArrayEquals( + Crypto.digest(Paths.get(finalPath.toString(), "dir1", "lorem4.txt").toFile()), + Crypto.digest(Paths.get(path2.toString(), "dir1", "lorem4.txt").toFile()) + ); + assertArrayEquals( + Crypto.digest(Paths.get(finalPath.toString(), "dir1", "dir2", "lorem5.txt").toFile()), + Crypto.digest(Paths.get(path2.toString(), "dir1", "dir2", "lorem5.txt").toFile()) + ); + + // Also check that the directory digests match + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(path2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + } + + @Test + public void testIdenticalPaths() throws IOException, DataException { + Path path = Paths.get("src/test/resources/arbitrary/demo1"); + + // Create a patch from two identical paths + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(path, path, new byte[16]); + + // Ensure that an exception is thrown due to matching states + try { + patch.create(); + fail("Creating patch should fail due to matching states"); + + } catch (DataException expectedException) { + assertEquals("Current state matches previous state. Nothing to do.", expectedException.getMessage()); + } + + } + + @Test + public void testMergeBinaryFiles() throws IOException, DataException { + // Create two files in random temp directories + Path tempDir1 = Files.createTempDirectory("testMergeBinaryFiles1"); + Path tempDir2 = Files.createTempDirectory("testMergeBinaryFiles2"); + File file1 = new File(Paths.get(tempDir1.toString(), "file.bin").toString()); + File file2 = new File(Paths.get(tempDir2.toString(), "file.bin").toString()); + file1.deleteOnExit(); + file2.deleteOnExit(); + + // Write random data to the first file + byte[] initialData = new byte[1024]; + new Random().nextBytes(initialData); + Files.write(file1.toPath(), initialData); + byte[] file1Digest = Crypto.digest(file1); + + // Write slightly modified data to the second file (bytes 100-116 are zeroed out) + byte[] updatedData = Arrays.copyOf(initialData, initialData.length); + final ByteBuffer byteBuffer = ByteBuffer.wrap(updatedData); + byteBuffer.position(100); + byteBuffer.put(new byte[16]); + updatedData = byteBuffer.array(); + Files.write(file2.toPath(), updatedData); + byte[] file2Digest = Crypto.digest(file2); + + // Make sure the two arrays are different + assertFalse(Arrays.equals(initialData, updatedData)); + + // And double check that they are both 1024 bytes long + assertEquals(1024, initialData.length); + assertEquals(1024, updatedData.length); + + // Ensure both files exist + assertTrue(Files.exists(file1.toPath())); + assertTrue(Files.exists(file2.toPath())); + + // Generate random signature for the purposes of validation + byte[] signature = new byte[32]; + new Random().nextBytes(signature); + + // Create a patch from the two paths + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(tempDir1, tempDir2, signature); + patch.create(); + Path patchPath = patch.getFinalPath(); + assertTrue(Files.exists(patchPath)); + + // Check that the patch file exists + Path patchFilePath = Paths.get(patchPath.toString(), "file.bin"); + assertTrue(Files.exists(patchFilePath)); + byte[] patchDigest = Crypto.digest(patchFilePath.toFile()); + + // Ensure that the patch file matches file2 exactly + // This is because binary files cannot currently be patched, and so the complete file + // is included instead + assertArrayEquals(patchDigest, file2Digest); + + // Make sure that the patch file is different from file1 + assertFalse(Arrays.equals(patchDigest, file1Digest)); + + // Now merge the patch with the original path + ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(tempDir1, patchPath, signature); + combiner.setShouldValidateHashes(true); + combiner.combine(); + Path finalPath = combiner.getFinalPath(); + + // Check that the directory digests match + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(tempDir2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + } + + @Test + public void testMergeRandomStrings() throws IOException, DataException { + // Create two files in random temp directories + Path tempDir1 = Files.createTempDirectory("testMergeRandomStrings"); + Path tempDir2 = Files.createTempDirectory("testMergeRandomStrings"); + File file1 = new File(Paths.get(tempDir1.toString(), "file.txt").toString()); + File file2 = new File(Paths.get(tempDir2.toString(), "file.txt").toString()); + file1.deleteOnExit(); + file2.deleteOnExit(); + + // Write a random string to the first file + BufferedWriter file1Writer = new BufferedWriter(new FileWriter(file1)); + String initialString = ArbitraryUtils.generateRandomString(1024); + // Add a newline every 50 chars + initialString = initialString.replaceAll("(.{50})", "$1\n"); + file1Writer.write(initialString); + file1Writer.newLine(); + file1Writer.close(); + byte[] file1Digest = Crypto.digest(file1); + + // Write a slightly modified string to the second file + BufferedWriter file2Writer = new BufferedWriter(new FileWriter(file2)); + String updatedString = initialString.concat("-edit"); + file2Writer.write(updatedString); + file2Writer.newLine(); + file2Writer.close(); + byte[] file2Digest = Crypto.digest(file2); + + // Make sure the two strings are different + assertFalse(Objects.equals(initialString, updatedString)); + + // Ensure both files exist + assertTrue(Files.exists(file1.toPath())); + assertTrue(Files.exists(file2.toPath())); + + // Generate random signature for the purposes of validation + byte[] signature = new byte[32]; + new Random().nextBytes(signature); + + // Create a patch from the two paths + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(tempDir1, tempDir2, signature); + patch.create(); + Path patchPath = patch.getFinalPath(); + assertTrue(Files.exists(patchPath)); + + // Check that the patch file exists + Path patchFilePath = Paths.get(patchPath.toString(), "file.txt"); + assertTrue(Files.exists(patchFilePath)); + byte[] patchDigest = Crypto.digest(patchFilePath.toFile()); + + // Make sure that the patch file is different from file1 and file2 + assertFalse(Arrays.equals(patchDigest, file1Digest)); + assertFalse(Arrays.equals(patchDigest, file2Digest)); + + // Now merge the patch with the original path + ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(tempDir1, patchPath, signature); + combiner.setShouldValidateHashes(true); + combiner.combine(); + Path finalPath = combiner.getFinalPath(); + + // Check that the directory digests match + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(tempDir2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + + } + + @Test + public void testMergeRandomStringsWithoutTrailingNewlines() throws IOException, DataException { + // Create two files in random temp directories + Path tempDir1 = Files.createTempDirectory("testMergeRandomStrings"); + Path tempDir2 = Files.createTempDirectory("testMergeRandomStrings"); + File file1 = new File(Paths.get(tempDir1.toString(), "file.txt").toString()); + File file2 = new File(Paths.get(tempDir2.toString(), "file.txt").toString()); + file1.deleteOnExit(); + file2.deleteOnExit(); + + // Write a random string to the first file + BufferedWriter file1Writer = new BufferedWriter(new FileWriter(file1)); + String initialString = ArbitraryUtils.generateRandomString(1024); + // Add a newline every 50 chars + initialString = initialString.replaceAll("(.{50})", "$1\n"); + // Remove newline at end of string + initialString = initialString.stripTrailing(); + file1Writer.write(initialString); + // No newline + file1Writer.close(); + byte[] file1Digest = Crypto.digest(file1); + + // Write a slightly modified string to the second file + BufferedWriter file2Writer = new BufferedWriter(new FileWriter(file2)); + String updatedString = initialString.concat("-edit"); + file2Writer.write(updatedString); + // No newline + file2Writer.close(); + byte[] file2Digest = Crypto.digest(file2); + + // Make sure the two strings are different + assertFalse(Objects.equals(initialString, updatedString)); + + // Ensure both files exist + assertTrue(Files.exists(file1.toPath())); + assertTrue(Files.exists(file2.toPath())); + + // Generate random signature for the purposes of validation + byte[] signature = new byte[32]; + new Random().nextBytes(signature); + + // Create a patch from the two paths + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(tempDir1, tempDir2, signature); + patch.create(); + Path patchPath = patch.getFinalPath(); + assertTrue(Files.exists(patchPath)); + + // Check that the patch file exists + Path patchFilePath = Paths.get(patchPath.toString(), "file.txt"); + assertTrue(Files.exists(patchFilePath)); + byte[] patchDigest = Crypto.digest(patchFilePath.toFile()); + + // Make sure that the patch file is different from file1 + assertFalse(Arrays.equals(patchDigest, file1Digest)); + + // Make sure that the patch file is different from file2 + assertFalse(Arrays.equals(patchDigest, file2Digest)); + + // Now merge the patch with the original path + ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(tempDir1, patchPath, signature); + combiner.setShouldValidateHashes(true); + combiner.combine(); + Path finalPath = combiner.getFinalPath(); + + // Check that the directory digests match + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(tempDir2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + + } + + @Test + public void testMergeRandomLargeStrings() throws IOException, DataException { + // Create two files in random temp directories + Path tempDir1 = Files.createTempDirectory("testMergeRandomStrings"); + Path tempDir2 = Files.createTempDirectory("testMergeRandomStrings"); + File file1 = new File(Paths.get(tempDir1.toString(), "file.txt").toString()); + File file2 = new File(Paths.get(tempDir2.toString(), "file.txt").toString()); + file1.deleteOnExit(); + file2.deleteOnExit(); + + // Write a random string to the first file + BufferedWriter file1Writer = new BufferedWriter(new FileWriter(file1)); + String initialString = ArbitraryUtils.generateRandomString(110 * 1024); + // Add a newline every 50 chars + initialString = initialString.replaceAll("(.{50})", "$1\n"); + file1Writer.write(initialString); + file1Writer.newLine(); + file1Writer.close(); + byte[] file1Digest = Crypto.digest(file1); + + // Write a slightly modified string to the second file + BufferedWriter file2Writer = new BufferedWriter(new FileWriter(file2)); + String updatedString = initialString.concat("-edit"); + file2Writer.write(updatedString); + file2Writer.newLine(); + file2Writer.close(); + byte[] file2Digest = Crypto.digest(file2); + + // Make sure the two strings are different + assertFalse(Objects.equals(initialString, updatedString)); + + // Ensure both files exist + assertTrue(Files.exists(file1.toPath())); + assertTrue(Files.exists(file2.toPath())); + + // Generate random signature for the purposes of validation + byte[] signature = new byte[32]; + new Random().nextBytes(signature); + + // Create a patch from the two paths + ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(tempDir1, tempDir2, signature); + patch.create(); + Path patchPath = patch.getFinalPath(); + assertTrue(Files.exists(patchPath)); + + // Check that the patch file exists + Path patchFilePath = Paths.get(patchPath.toString(), "file.txt"); + assertTrue(Files.exists(patchFilePath)); + byte[] patchDigest = Crypto.digest(patchFilePath.toFile()); + + // The patch file should be identical to file2 because the source files + // were over the maximum size limit for creating patches + assertArrayEquals(patchDigest, file2Digest); + + // Make sure that the patch file is different from file1 + assertFalse(Arrays.equals(patchDigest, file1Digest)); + + // Now merge the patch with the original path + ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(tempDir1, patchPath, signature); + combiner.setShouldValidateHashes(true); + combiner.combine(); + Path finalPath = combiner.getFinalPath(); + + // Check that the directory digests match + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(tempDir2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStorageCapacityTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStorageCapacityTests.java new file mode 100644 index 00000000..c38327c3 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStorageCapacityTests.java @@ -0,0 +1,199 @@ +package org.qortal.test.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.RegisterNameTransactionData; +import org.qortal.list.ResourceListManager; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.test.common.ArbitraryUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.junit.Assert.*; + +public class ArbitraryDataStorageCapacityTests extends Common { + + @Before + public void beforeTest() throws DataException, InterruptedException, IllegalAccessException { + Common.useDefaultSettings(); + this.deleteDataDirectories(); + this.deleteListsDirectory(); + + // Set difficulty to 1 to speed up the tests + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + } + + @After + public void afterTest() throws DataException { + this.deleteDataDirectories(); + this.deleteListsDirectory(); + ArbitraryDataStorageManager.getInstance().shutdown(); + } + + + @Test + public void testCalculateTotalStorageCapacity() { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + double storageFullThreshold = 0.9; // 90% + Long now = NTP.getTime(); + assertNotNull("NTP time must be synced", now); + long expectedTotalStorageCapacity = Settings.getInstance().getMaxStorageCapacity(); + + // Capacity isn't initially calculated + assertNull(storageManager.getStorageCapacity()); + assertEquals(0L, storageManager.getTotalDirectorySize()); + assertFalse(storageManager.isStorageCapacityCalculated()); + + // We need to calculate the directory size because we haven't yet + assertTrue(storageManager.shouldCalculateDirectorySize(now)); + storageManager.calculateDirectorySize(now); + assertTrue(storageManager.isStorageCapacityCalculated()); + + // Storage capacity should equal the value specified in settings + assertNotNull(storageManager.getStorageCapacity()); + assertEquals(expectedTotalStorageCapacity, storageManager.getStorageCapacity().longValue()); + + // We shouldn't calculate storage capacity again so soon + now += 9 * 60 * 1000L; + assertFalse(storageManager.shouldCalculateDirectorySize(now)); + + // ... but after 10 minutes we should recalculate + now += 1 * 60 * 1000L + 1L; + assertTrue(storageManager.shouldCalculateDirectorySize(now)); + } + + @Test + public void testCalculateStorageCapacityPerName() { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + ResourceListManager resourceListManager = ResourceListManager.getInstance(); + double storageFullThreshold = 0.9; // 90% + Long now = NTP.getTime(); + assertNotNull("NTP time must be synced", now); + + // Capacity isn't initially calculated + assertNull(storageManager.getStorageCapacity()); + assertEquals(0L, storageManager.getTotalDirectorySize()); + assertFalse(storageManager.isStorageCapacityCalculated()); + + // We need to calculate the total directory size because we haven't yet + assertTrue(storageManager.shouldCalculateDirectorySize(now)); + storageManager.calculateDirectorySize(now); + assertTrue(storageManager.isStorageCapacityCalculated()); + + // Storage capacity should initially equal the total + assertEquals(0, resourceListManager.getItemCountForList("followedNames")); + long totalStorageCapacity = storageManager.getStorageCapacityIncludingThreshold(storageFullThreshold); + assertEquals(totalStorageCapacity, storageManager.storageCapacityPerName(storageFullThreshold)); + + // Follow some names + assertTrue(resourceListManager.addToList("followedNames", "Test1", false)); + assertTrue(resourceListManager.addToList("followedNames", "Test2", false)); + assertTrue(resourceListManager.addToList("followedNames", "Test3", false)); + assertTrue(resourceListManager.addToList("followedNames", "Test4", false)); + + // Ensure the followed name count is correct + assertEquals(4, resourceListManager.getItemCountForList("followedNames")); + + // Storage space per name should be the total storage capacity divided by the number of names + long expectedStorageCapacityPerName = (long)(totalStorageCapacity / 4.0f); + assertEquals(expectedStorageCapacityPerName, storageManager.storageCapacityPerName(storageFullThreshold)); + } + + + private void deleteDataDirectories() { + // Delete data directory if exists + Path dataPath = Paths.get(Settings.getInstance().getDataPath()); + try { + FileUtils.deleteDirectory(dataPath.toFile()); + } catch (IOException e) { + + } + + // Delete temp data directory if exists + Path tempDataPath = Paths.get(Settings.getInstance().getTempDataPath()); + try { + FileUtils.deleteDirectory(tempDataPath.toFile()); + } catch (IOException e) { + + } + } + + @Test + public void testDeleteRandomFilesForName() throws DataException, IOException, InterruptedException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Set originalCopyIndicatorFileEnabled to false, otherwise nothing will be deleted as it all originates from this node + FieldUtils.writeField(Settings.getInstance(), "originalCopyIndicatorFileEnabled", false, true); + + // Alice hosts some data (with 10 chunks) + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String aliceName = "alice"; + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), aliceName, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + Path alicePath = ArbitraryUtils.generateRandomDataPath(dataLength); + ArbitraryDataFile aliceArbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, Base58.encode(alice.getPublicKey()), alicePath, aliceName, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize); + + // Bob hosts some data too (also with 10 chunks) + PrivateKeyAccount bob = Common.getTestAccount(repository, "bob"); + String bobName = "bob"; + transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(bob), bobName, ""); + TransactionUtils.signAndMint(repository, transactionData, bob); + Path bobPath = ArbitraryUtils.generateRandomDataPath(dataLength); + ArbitraryDataFile bobArbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, Base58.encode(bob.getPublicKey()), bobPath, bobName, identifier, ArbitraryTransactionData.Method.PUT, service, bob, chunkSize); + + // All 20 chunks should exist + assertEquals(10, aliceArbitraryDataFile.chunkCount()); + assertTrue(aliceArbitraryDataFile.allChunksExist()); + assertEquals(10, bobArbitraryDataFile.chunkCount()); + assertTrue(bobArbitraryDataFile.allChunksExist()); + + // Now pretend that Bob has reached his storage limit - this should delete random files + // Run it 10 times to remove the likelihood of the randomizer always picking Alice's files + for (int i=0; i<10; i++) { + ArbitraryDataCleanupManager.getInstance().storageLimitReachedForName(repository, bobName); + } + + // Alice should still have all chunks + assertTrue(aliceArbitraryDataFile.allChunksExist()); + + // Bob should be missing some chunks + assertFalse(bobArbitraryDataFile.allChunksExist()); + + } + } + + private void deleteListsDirectory() { + // Delete lists directory if exists + Path listsPath = Paths.get(Settings.getInstance().getListsPath()); + try { + FileUtils.deleteDirectory(listsPath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java new file mode 100644 index 00000000..5c88956e --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java @@ -0,0 +1,273 @@ +package org.qortal.test.arbitrary; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataTransactionBuilder; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.controller.arbitrary.ArbitraryDataStorageManager.StoragePolicy; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.ArbitraryTransactionData.Method; +import org.qortal.data.transaction.RegisterNameTransactionData; +import org.qortal.list.ResourceListManager; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.junit.Assert.*; + +public class ArbitraryDataStoragePolicyTests extends Common { + + @Before + public void beforeTest() throws DataException, InterruptedException { + Common.useDefaultSettings(); + this.deleteDataDirectories(); + this.deleteListsDirectory(); + ArbitraryDataStorageManager.getInstance().start(); + + // Wait for storage space to be calculated + while (!ArbitraryDataStorageManager.getInstance().isStorageCapacityCalculated()) { + Thread.sleep(100L); + } + } + + @After + public void afterTest() throws DataException { + this.deleteDataDirectories(); + this.deleteListsDirectory(); + ArbitraryDataStorageManager.getInstance().shutdown(); + } + + @Test + public void testFollowedAndViewed() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "Test"; + + // Register the name to Alice + TransactionUtils.signAndMint(repository, new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""), alice); + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // Add name to followed list + assertTrue(ResourceListManager.getInstance().addToList("followedNames", name, false)); + + // We should store and pre-fetch data for this transaction + assertEquals(StoragePolicy.FOLLOWED_AND_VIEWED, Settings.getInstance().getStoragePolicy()); + assertTrue(storageManager.canStoreData(transactionData)); + assertTrue(storageManager.shouldPreFetchData(repository, transactionData)); + + // Now unfollow the name + assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); + + // We should store but not pre-fetch data for this transaction + assertTrue(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + @Test + public void testFollowedOnly() throws DataException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "Test"; + + // Set the storage policy to "FOLLOWED" + FieldUtils.writeField(Settings.getInstance(), "storagePolicy", "FOLLOWED", true); + + // Register the name to Alice + TransactionUtils.signAndMint(repository, new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""), alice); + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // Add name to followed list + assertTrue(ResourceListManager.getInstance().addToList("followedNames", name, false)); + + // We should store and pre-fetch data for this transaction + assertEquals(StoragePolicy.FOLLOWED, Settings.getInstance().getStoragePolicy()); + assertTrue(storageManager.canStoreData(transactionData)); + assertTrue(storageManager.shouldPreFetchData(repository, transactionData)); + + // Now unfollow the name + assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); + + // We shouldn't store or pre-fetch data for this transaction + assertFalse(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + @Test + public void testViewedOnly() throws DataException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "Test"; + + // Set the storage policy to "VIEWED" + FieldUtils.writeField(Settings.getInstance(), "storagePolicy", "VIEWED", true); + + // Register the name to Alice + TransactionUtils.signAndMint(repository, new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""), alice); + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // Add name to followed list + assertTrue(ResourceListManager.getInstance().addToList("followedNames", name, false)); + + // We should store but not pre-fetch data for this transaction + assertEquals(StoragePolicy.VIEWED, Settings.getInstance().getStoragePolicy()); + assertTrue(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + + // Now unfollow the name + assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); + + // We should store but not pre-fetch data for this transaction + assertTrue(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + @Test + public void testAll() throws DataException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "Test"; + + // Set the storage policy to "ALL" + FieldUtils.writeField(Settings.getInstance(), "storagePolicy", "ALL", true); + + // Register the name to Alice + TransactionUtils.signAndMint(repository, new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""), alice); + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // Add name to followed list + assertTrue(ResourceListManager.getInstance().addToList("followedNames", name, false)); + + // We should store and pre-fetch data for this transaction + assertEquals(StoragePolicy.ALL, Settings.getInstance().getStoragePolicy()); + assertTrue(storageManager.canStoreData(transactionData)); + assertTrue(storageManager.shouldPreFetchData(repository, transactionData)); + + // Now unfollow the name + assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); + + // We should store and pre-fetch data for this transaction + assertTrue(storageManager.canStoreData(transactionData)); + assertTrue(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + @Test + public void testNone() throws DataException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "Test"; + + // Set the storage policy to "NONE" + FieldUtils.writeField(Settings.getInstance(), "storagePolicy", "NONE", true); + + // Register the name to Alice + TransactionUtils.signAndMint(repository, new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""), alice); + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // Add name to followed list + assertTrue(ResourceListManager.getInstance().addToList("followedNames", name, false)); + + // We shouldn't store or pre-fetch data for this transaction + assertEquals(StoragePolicy.NONE, Settings.getInstance().getStoragePolicy()); + assertFalse(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + + // Now unfollow the name + assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); + + // We shouldn't store or pre-fetch data for this transaction + assertFalse(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + @Test + public void testTransactionWithoutName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance(); + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = null; + + // Create transaction + ArbitraryTransactionData transactionData = this.createTxnWithName(repository, alice, name); + + // We should store but not pre-fetch data for this transaction + assertTrue(storageManager.canStoreData(transactionData)); + assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); + } + } + + private ArbitraryTransactionData createTxnWithName(Repository repository, PrivateKeyAccount acc, String name) throws DataException { + String publicKey58 = Base58.encode(acc.getPublicKey()); + Path path = Paths.get("src/test/resources/arbitrary/demo1"); + + ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( + repository, publicKey58, path, name, Method.PUT, Service.ARBITRARY_DATA, null); + + txnBuilder.build(); + ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData(); + + return transactionData; + } + + private void deleteDataDirectories() { + // Delete data directory if exists + Path dataPath = Paths.get(Settings.getInstance().getDataPath()); + try { + FileUtils.deleteDirectory(dataPath.toFile()); + } catch (IOException e) { + + } + + // Delete temp data directory if exists + Path tempDataPath = Paths.get(Settings.getInstance().getTempDataPath()); + try { + FileUtils.deleteDirectory(tempDataPath.toFile()); + } catch (IOException e) { + + } + } + + private void deleteListsDirectory() { + // Delete lists directory if exists + Path listsPath = Paths.get(Settings.getInstance().getListsPath()); + try { + FileUtils.deleteDirectory(listsPath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataTests.java new file mode 100644 index 00000000..e8e4a288 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataTests.java @@ -0,0 +1,471 @@ +package org.qortal.test.arbitrary; + +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataDigest; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.arbitrary.ArbitraryDataReader; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.crypto.Crypto; +import org.qortal.data.transaction.ArbitraryTransactionData.*; +import org.qortal.data.transaction.RegisterNameTransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.test.common.ArbitraryUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Objects; + +import static org.junit.Assert.*; + +public class ArbitraryDataTests extends Common { + + @Before + public void beforeTest() throws DataException, IllegalAccessException { + Common.useDefaultSettings(); + + // Set difficulty to 1 to speed up the tests + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + } + + @Test + public void testCombineMultipleLayers() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Create PATCH transaction + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path2, name, identifier, Method.PATCH, service, alice); + + // Create another PATCH transaction + Path path3 = Paths.get("src/test/resources/arbitrary/demo3"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path3, name, identifier, Method.PATCH, service, alice); + + // Now build the latest data state for this name + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader.loadSynchronously(true); + Path finalPath = arbitraryDataReader.getFilePath(); + + // Ensure it exists + assertTrue(Files.exists(finalPath)); + + // Its directory hash should match the hash of demo3 + ArbitraryDataDigest path3Digest = new ArbitraryDataDigest(path3); + path3Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path3Digest.getHash58(), finalPathDigest.getHash58()); + + // .. and its directory hash should also match the one included in the metadata + ArbitraryDataMetadataPatch patchMetadata = new ArbitraryDataMetadataPatch(finalPath); + patchMetadata.read(); + assertArrayEquals(patchMetadata.getCurrentHash(), path3Digest.getHash()); + + } + } + + @Test + public void testPatchBeforePut() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Create PATCH transaction, ensuring that an exception is thrown + try { + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PATCH, service, alice); + fail("Creating transaction should fail due to nonexistent PUT transaction"); + + } catch (DataException expectedException) { + assertEquals(String.format("Couldn't find PUT transaction for " + + "name %s, service %s and identifier ", name, service), expectedException.getMessage()); + } + + } + } + + @Test + public void testNameDoesNotExist() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Create PUT transaction, ensuring that an exception is thrown + try { + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + fail("Creating transaction should fail due to the name being unregistered"); + + } catch (DataException expectedException) { + assertEquals("Arbitrary transaction invalid: NAME_DOES_NOT_EXIST", expectedException.getMessage()); + } + } + } + + @Test + public void testUpdateResourceOwnedByAnotherCreator() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, Base58.encode(alice.getPublicKey()), path1, name, identifier, Method.PUT, service, alice); + + // Bob attempts to update Alice's data + PrivateKeyAccount bob = Common.getTestAccount(repository, "bob"); + + // Create PATCH transaction, ensuring that an exception is thrown + try { + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + ArbitraryUtils.createAndMintTxn(repository, Base58.encode(bob.getPublicKey()), path2, name, identifier, Method.PATCH, service, bob); + fail("Creating transaction should fail due to the name being registered to Alice instead of Bob"); + + } catch (DataException expectedException) { + assertEquals("Arbitrary transaction invalid: INVALID_NAME_OWNER", expectedException.getMessage()); + } + } + } + + @Test + public void testUpdateResource() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Now build the latest data state for this name + ArbitraryDataReader arbitraryDataReader1 = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader1.loadSynchronously(true); + Path initialLayerPath = arbitraryDataReader1.getFilePath(); + ArbitraryDataDigest initialLayerDigest = new ArbitraryDataDigest(initialLayerPath); + initialLayerDigest.compute(); + + // Create PATCH transaction + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path2, name, identifier, Method.PATCH, service, alice); + + // Rebuild the latest state + ArbitraryDataReader arbitraryDataReader2 = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader2.loadSynchronously(false); + Path secondLayerPath = arbitraryDataReader2.getFilePath(); + ArbitraryDataDigest secondLayerDigest = new ArbitraryDataDigest(secondLayerPath); + secondLayerDigest.compute(); + + // Ensure that the second state is different to the first state + assertFalse(Arrays.equals(initialLayerDigest.getHash(), secondLayerDigest.getHash())); + + // Its directory hash should match the hash of demo2 + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(path2); + path2Digest.compute(); + assertEquals(path2Digest.getHash58(), secondLayerDigest.getHash58()); + } + } + + @Test + public void testIdentifier() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = "test_identifier"; + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Build the latest data state for this name, with a null identifier, ensuring that it fails + ArbitraryDataReader arbitraryDataReader1a = new ArbitraryDataReader(name, ResourceIdType.NAME, service, null); + try { + arbitraryDataReader1a.loadSynchronously(true); + fail("Loading data with null identifier should fail due to nonexistent PUT transaction"); + + } catch (DataException expectedException) { + assertEquals(String.format("Couldn't find PUT transaction for name %s, service %s " + + "and identifier ", name.toLowerCase(), service), expectedException.getMessage()); + } + + // Build the latest data state for this name, with a different identifier, ensuring that it fails + String differentIdentifier = "different_identifier"; + ArbitraryDataReader arbitraryDataReader1b = new ArbitraryDataReader(name, ResourceIdType.NAME, service, differentIdentifier); + try { + arbitraryDataReader1b.loadSynchronously(true); + fail("Loading data with incorrect identifier should fail due to nonexistent PUT transaction"); + + } catch (DataException expectedException) { + assertEquals(String.format("Couldn't find PUT transaction for name %s, service %s " + + "and identifier %s", name.toLowerCase(), service, differentIdentifier), expectedException.getMessage()); + } + + // Now build the latest data state for this name, with the correct identifier + ArbitraryDataReader arbitraryDataReader1c = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader1c.loadSynchronously(true); + Path initialLayerPath = arbitraryDataReader1c.getFilePath(); + ArbitraryDataDigest initialLayerDigest = new ArbitraryDataDigest(initialLayerPath); + initialLayerDigest.compute(); + + // Create PATCH transaction + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path2, name, identifier, Method.PATCH, service, alice); + + // Rebuild the latest state + ArbitraryDataReader arbitraryDataReader2 = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader2.loadSynchronously(false); + Path secondLayerPath = arbitraryDataReader2.getFilePath(); + ArbitraryDataDigest secondLayerDigest = new ArbitraryDataDigest(secondLayerPath); + secondLayerDigest.compute(); + + // Ensure that the second state is different to the first state + assertFalse(Arrays.equals(initialLayerDigest.getHash(), secondLayerDigest.getHash())); + + // Its directory hash should match the hash of demo2 + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(path2); + path2Digest.compute(); + assertEquals(path2Digest.getHash58(), secondLayerDigest.getHash58()); + } + } + + @Test + public void testBlankIdentifier() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = ""; // Blank, not null + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryDataDigest path1Digest = new ArbitraryDataDigest(path1); + path1Digest.compute(); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Now build the latest data state for this name with a null identifier, ensuring that it succeeds and the data matches + ArbitraryDataReader arbitraryDataReader1a = new ArbitraryDataReader(name, ResourceIdType.NAME, service, null); + arbitraryDataReader1a.loadSynchronously(true); + Path initialLayerPath1a = arbitraryDataReader1a.getFilePath(); + ArbitraryDataDigest initialLayerDigest1a = new ArbitraryDataDigest(initialLayerPath1a); + initialLayerDigest1a.compute(); + assertEquals(path1Digest.getHash58(), initialLayerDigest1a.getHash58()); + + // It should also be accessible via a blank string, as we treat null and blank as the same thing + ArbitraryDataReader arbitraryDataReader1b = new ArbitraryDataReader(name, ResourceIdType.NAME, service, ""); + arbitraryDataReader1b.loadSynchronously(true); + Path initialLayerPath1b = arbitraryDataReader1b.getFilePath(); + ArbitraryDataDigest initialLayerDigest1b = new ArbitraryDataDigest(initialLayerPath1b); + initialLayerDigest1b.compute(); + assertEquals(path1Digest.getHash58(), initialLayerDigest1b.getHash58()); + + // Build the latest data state for this name, with a different identifier, ensuring that it fails + String differentIdentifier = "different_identifier"; + ArbitraryDataReader arbitraryDataReader1c = new ArbitraryDataReader(name, ResourceIdType.NAME, service, differentIdentifier); + try { + arbitraryDataReader1c.loadSynchronously(true); + fail("Loading data with incorrect identifier should fail due to nonexistent PUT transaction"); + + } catch (DataException expectedException) { + assertEquals(String.format("Couldn't find PUT transaction for name %s, service %s " + + "and identifier %s", name.toLowerCase(), service, differentIdentifier), expectedException.getMessage()); + } + } + } + + @Test + public void testSingleFile() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = "test1"; // Blank, not null + Service service = Service.DOCUMENT; // Can be anything for this test + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1/lorem1.txt"); + byte[] path1FileDigest = Crypto.digest(path1.toFile()); + ArbitraryDataDigest path1DirectoryDigest = new ArbitraryDataDigest(path1.getParent()); + path1DirectoryDigest.compute(); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Now build the latest data state for this name + ArbitraryDataReader arbitraryDataReader1 = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader1.loadSynchronously(true); + Path builtFilePath = Paths.get(arbitraryDataReader1.getFilePath().toString(), path1.getFileName().toString()); + byte[] builtFileDigest = Crypto.digest(builtFilePath.toFile()); + + // Compare it against the hash of the original file + assertArrayEquals(builtFileDigest, path1FileDigest); + + // The directory digest won't match because the file is renamed to "data" + // We may need to find a way to retain the filename + ArbitraryDataDigest builtDirectoryDigest = new ArbitraryDataDigest(arbitraryDataReader1.getFilePath()); + builtDirectoryDigest.compute(); + assertFalse(Objects.equals(path1DirectoryDigest.getHash58(), builtDirectoryDigest.getHash58())); + } + } + + @Test + public void testOriginalCopyIndicatorFile() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = "test1"; // Blank, not null + Service service = Service.DOCUMENT; // Can be anything for this test + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1/lorem1.txt"); + ArbitraryDataDigest path1DirectoryDigest = new ArbitraryDataDigest(path1.getParent()); + path1DirectoryDigest.compute(); + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Ensure that an ".original" file exists + Path parentPath = arbitraryDataFile.getFilePath().getParent(); + Path originalCopyIndicatorFile = Paths.get(parentPath.toString(), ".original"); + assertTrue(Files.exists(originalCopyIndicatorFile)); + } + } + + @Test + public void testOriginalCopyIndicatorFileDisabled() throws DataException, IOException, IllegalAccessException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = "test1"; // Blank, not null + Service service = Service.DOCUMENT; // Can be anything for this test + + // Set originalCopyIndicatorFileEnabled to false + FieldUtils.writeField(Settings.getInstance(), "originalCopyIndicatorFileEnabled", false, true); + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1/lorem1.txt"); + ArbitraryDataDigest path1DirectoryDigest = new ArbitraryDataDigest(path1.getParent()); + path1DirectoryDigest.compute(); + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Ensure that an ".original" file exists + Path parentPath = arbitraryDataFile.getFilePath().getParent(); + Path originalCopyIndicatorFile = Paths.get(parentPath.toString(), ".original"); + assertFalse(Files.exists(originalCopyIndicatorFile)); + } + } + + @Test + public void testNameWithSpace() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "Test Name"; + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = Paths.get("src/test/resources/arbitrary/demo1"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice); + + // Create PATCH transaction + Path path2 = Paths.get("src/test/resources/arbitrary/demo2"); + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path2, name, identifier, Method.PATCH, service, alice); + + // Now build the latest data state for this name + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader.loadSynchronously(true); + Path finalPath = arbitraryDataReader.getFilePath(); + + // Ensure it exists + assertTrue(Files.exists(finalPath)); + + // Its directory hash should match the hash of demo2 + ArbitraryDataDigest path2Digest = new ArbitraryDataDigest(path2); + path2Digest.compute(); + ArbitraryDataDigest finalPathDigest = new ArbitraryDataDigest(finalPath); + finalPathDigest.compute(); + assertEquals(path2Digest.getHash58(), finalPathDigest.getHash58()); + + // .. and its directory hash should also match the one included in the metadata + ArbitraryDataMetadataPatch patchMetadata = new ArbitraryDataMetadataPatch(finalPath); + patchMetadata.read(); + assertArrayEquals(patchMetadata.getCurrentHash(), path2Digest.getHash()); + + } + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryPeerTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryPeerTests.java new file mode 100644 index 00000000..ed7caa70 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryPeerTests.java @@ -0,0 +1,155 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.crypto.Crypto; +import org.qortal.data.network.ArbitraryPeerData; +import org.qortal.data.network.PeerData; +import org.qortal.network.Peer; +import org.qortal.network.PeerAddress; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.Common; +import org.qortal.utils.NTP; + +import java.util.Random; + +import static org.junit.Assert.*; + +public class ArbitraryPeerTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testSaveArbitraryPeerData() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + String peerAddress = "127.0.0.1:12392"; + String host = peerAddress.split(":")[0]; + + // Create random bytes to represent a signature + byte[] signature = new byte[64]; + new Random().nextBytes(signature); + + // Make sure we don't have an entry for this hash/peer combination + assertNull(repository.getArbitraryRepository().getArbitraryPeerDataForSignatureAndHost(signature, host)); + + // Now add this mapping to the db + Peer peer = new Peer(new PeerData(PeerAddress.fromString(peerAddress))); + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); + assertTrue(arbitraryPeerData.isPeerAddressValid()); + repository.getArbitraryRepository().save(arbitraryPeerData); + + // We should now have an entry for this hash/peer combination + ArbitraryPeerData retrievedArbitraryPeerData = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, host); + assertNotNull(retrievedArbitraryPeerData); + + // .. and its data should match what was saved + assertArrayEquals(Crypto.digest(signature), retrievedArbitraryPeerData.getHash()); + assertEquals(peerAddress, retrievedArbitraryPeerData.getPeerAddress()); + + } + } + + @Test + public void testUpdateArbitraryPeerData() throws DataException, InterruptedException { + try (final Repository repository = RepositoryManager.getRepository()) { + + String peerAddress = "127.0.0.1:12392"; + String host = peerAddress.split(":")[0]; + + // Create random bytes to represent a signature + byte[] signature = new byte[64]; + new Random().nextBytes(signature); + + // Make sure we don't have an entry for this hash/peer combination + assertNull(repository.getArbitraryRepository().getArbitraryPeerDataForSignatureAndHost(signature, host)); + + // Now add this mapping to the db + Peer peer = new Peer(new PeerData(PeerAddress.fromString(peerAddress))); + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); + assertTrue(arbitraryPeerData.isPeerAddressValid()); + repository.getArbitraryRepository().save(arbitraryPeerData); + + // We should now have an entry for this hash/peer combination + ArbitraryPeerData retrievedArbitraryPeerData = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, host); + assertNotNull(retrievedArbitraryPeerData); + + // .. and its data should match what was saved + assertArrayEquals(Crypto.digest(signature), retrievedArbitraryPeerData.getHash()); + assertEquals(peerAddress, retrievedArbitraryPeerData.getPeerAddress()); + + // All stats should be zero + assertEquals(Integer.valueOf(0), retrievedArbitraryPeerData.getSuccesses()); + assertEquals(Integer.valueOf(0), retrievedArbitraryPeerData.getFailures()); + assertEquals(Long.valueOf(0), retrievedArbitraryPeerData.getLastAttempted()); + assertEquals(Long.valueOf(0), retrievedArbitraryPeerData.getLastRetrieved()); + + // Now modify some values and re-save + retrievedArbitraryPeerData.incrementSuccesses(); retrievedArbitraryPeerData.incrementSuccesses(); // Twice + retrievedArbitraryPeerData.incrementFailures(); // Once + retrievedArbitraryPeerData.markAsAttempted(); + Thread.sleep(100); + retrievedArbitraryPeerData.markAsRetrieved(); + assertTrue(arbitraryPeerData.isPeerAddressValid()); + repository.getArbitraryRepository().save(retrievedArbitraryPeerData); + + // Retrieve data once again + ArbitraryPeerData updatedArbitraryPeerData = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, host); + assertNotNull(updatedArbitraryPeerData); + + // Check the values + assertArrayEquals(Crypto.digest(signature), updatedArbitraryPeerData.getHash()); + assertEquals(peerAddress, updatedArbitraryPeerData.getPeerAddress()); + assertEquals(Integer.valueOf(2), updatedArbitraryPeerData.getSuccesses()); + assertEquals(Integer.valueOf(1), updatedArbitraryPeerData.getFailures()); + assertTrue(updatedArbitraryPeerData.getLastRetrieved().longValue() > 0L); + assertTrue(updatedArbitraryPeerData.getLastAttempted().longValue() > 0L); + assertTrue(updatedArbitraryPeerData.getLastRetrieved() > updatedArbitraryPeerData.getLastAttempted()); + assertTrue(NTP.getTime() - updatedArbitraryPeerData.getLastRetrieved() < 1000); + assertTrue(NTP.getTime() - updatedArbitraryPeerData.getLastAttempted() < 1000); + } + } + + @Test + public void testDuplicatePeerHost() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + String peerAddress1 = "127.0.0.1:12392"; + String peerAddress2 = "127.0.0.1:62392"; + String host1 = peerAddress1.split(":")[0]; + String host2 = peerAddress2.split(":")[0]; + + // Create random bytes to represent a signature + byte[] signature = new byte[64]; + new Random().nextBytes(signature); + + // Make sure we don't have an entry for these hash/peer combinations + assertNull(repository.getArbitraryRepository().getArbitraryPeerDataForSignatureAndHost(signature, host1)); + assertNull(repository.getArbitraryRepository().getArbitraryPeerDataForSignatureAndHost(signature, host2)); + + // Now add this mapping to the db + Peer peer = new Peer(new PeerData(PeerAddress.fromString(peerAddress1))); + ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); + assertTrue(arbitraryPeerData.isPeerAddressValid()); + repository.getArbitraryRepository().save(arbitraryPeerData); + + // We should now have an entry for this hash/peer combination + ArbitraryPeerData retrievedArbitraryPeerData = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, host1); + assertNotNull(retrievedArbitraryPeerData); + + // And we should also have an entry for the similar peerAddress string with a matching host + ArbitraryPeerData retrievedArbitraryPeerData2 = repository.getArbitraryRepository() + .getArbitraryPeerDataForSignatureAndHost(signature, host2); + assertNotNull(retrievedArbitraryPeerData2); + } + } +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryServiceTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryServiceTests.java new file mode 100644 index 00000000..4db8bdc7 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryServiceTests.java @@ -0,0 +1,178 @@ +package org.qortal.test.arbitrary; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.arbitrary.misc.Service; +import org.qortal.arbitrary.misc.Service.ValidationResult; +import org.qortal.repository.DataException; +import org.qortal.test.common.Common; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Random; + +import static org.junit.Assert.*; + +public class ArbitraryServiceTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testDefaultValidation() throws IOException { + // We don't validate the ARBITRARY_DATA service specifically, so we can use it to test the default validation method + byte[] data = new byte[1024]; + new Random().nextBytes(data); + + // Write to temp path + Path path = Files.createTempFile("testDefaultValidation", null); + path.toFile().deleteOnExit(); + Files.write(path, data, StandardOpenOption.CREATE); + + Service service = Service.ARBITRARY_DATA; + assertFalse(service.isValidationRequired()); + // Test validation anyway to ensure that no exception is thrown + assertEquals(ValidationResult.OK, service.validate(path)); + } + + @Test + public void testValidateWebsite() throws IOException { + // Generate some random data + byte[] data = new byte[1024]; + new Random().nextBytes(data); + + // Write the data to several files in a temp path + Path path = Files.createTempDirectory("testValidateWebsite"); + path.toFile().deleteOnExit(); + Files.write(Paths.get(path.toString(), "index.html"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data2"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data3"), data, StandardOpenOption.CREATE); + + Service service = Service.WEBSITE; + assertTrue(service.isValidationRequired()); + + // There is an index file in the root + assertEquals(ValidationResult.OK, service.validate(path)); + } + + @Test + public void testValidateWebsiteWithoutIndexFile() throws IOException { + // Generate some random data + byte[] data = new byte[1024]; + new Random().nextBytes(data); + + // Write the data to several files in a temp path + Path path = Files.createTempDirectory("testValidateWebsiteWithoutIndexFile"); + path.toFile().deleteOnExit(); + Files.write(Paths.get(path.toString(), "data1.html"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data2"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data3"), data, StandardOpenOption.CREATE); + + Service service = Service.WEBSITE; + assertTrue(service.isValidationRequired()); + + // There is no index file in the root + assertEquals(ValidationResult.MISSING_INDEX_FILE, service.validate(path)); + } + + @Test + public void testValidateWebsiteWithoutIndexFileInRoot() throws IOException { + // Generate some random data + byte[] data = new byte[1024]; + new Random().nextBytes(data); + + // Write the data to several files in a temp path + Path path = Files.createTempDirectory("testValidateWebsiteWithoutIndexFileInRoot"); + path.toFile().deleteOnExit(); + Files.createDirectories(Paths.get(path.toString(), "directory")); + Files.write(Paths.get(path.toString(), "directory", "index.html"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data2"), data, StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "data3"), data, StandardOpenOption.CREATE); + + Service service = Service.WEBSITE; + assertTrue(service.isValidationRequired()); + + // There is no index file in the root + assertEquals(ValidationResult.MISSING_INDEX_FILE, service.validate(path)); + } + + @Test + public void testValidQortalMetadata() throws IOException { + // Metadata is to describe an arbitrary resource (title, description, tags, etc) + String dataString = "{\"title\":\"Test Title\", \"description\":\"Test description\", \"tags\":[\"test\"]}"; + + // Write to temp path + Path path = Files.createTempFile("testValidQortalMetadata", null); + path.toFile().deleteOnExit(); + Files.write(path, dataString.getBytes(), StandardOpenOption.CREATE); + + Service service = Service.QORTAL_METADATA; + assertTrue(service.isValidationRequired()); + assertEquals(ValidationResult.OK, service.validate(path)); + } + + @Test + public void testQortalMetadataMissingKeys() throws IOException { + // Metadata is to describe an arbitrary resource (title, description, tags, etc) + String dataString = "{\"description\":\"Test description\", \"tags\":[\"test\"]}"; + + // Write to temp path + Path path = Files.createTempFile("testQortalMetadataMissingKeys", null); + path.toFile().deleteOnExit(); + Files.write(path, dataString.getBytes(), StandardOpenOption.CREATE); + + Service service = Service.QORTAL_METADATA; + assertTrue(service.isValidationRequired()); + assertEquals(ValidationResult.MISSING_KEYS, service.validate(path)); + } + + @Test + public void testQortalMetadataTooLarge() throws IOException { + // Metadata is to describe an arbitrary resource (title, description, tags, etc) + String dataString = "{\"title\":\"Test Title\", \"description\":\"Test description\", \"tags\":[\"test\"]}"; + + // Generate some large data to go along with it + int largeDataSize = 11*1024; // Larger than allowed 10kiB + byte[] largeData = new byte[largeDataSize]; + new Random().nextBytes(largeData); + + // Write to temp path + Path path = Files.createTempDirectory("testQortalMetadataTooLarge"); + path.toFile().deleteOnExit(); + Files.write(Paths.get(path.toString(), "data"), dataString.getBytes(), StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "large_data"), largeData, StandardOpenOption.CREATE); + + Service service = Service.QORTAL_METADATA; + assertTrue(service.isValidationRequired()); + assertEquals(ValidationResult.EXCEEDS_SIZE_LIMIT, service.validate(path)); + } + + @Test + public void testMultipleFileMetadata() throws IOException { + // Metadata is to describe an arbitrary resource (title, description, tags, etc) + String dataString = "{\"title\":\"Test Title\", \"description\":\"Test description\", \"tags\":[\"test\"]}"; + + // Generate some large data to go along with it + int otherDataSize = 1024; // Smaller than 10kiB limit + byte[] otherData = new byte[otherDataSize]; + new Random().nextBytes(otherData); + + // Write to temp path + Path path = Files.createTempDirectory("testMultipleFileMetadata"); + path.toFile().deleteOnExit(); + Files.write(Paths.get(path.toString(), "data"), dataString.getBytes(), StandardOpenOption.CREATE); + Files.write(Paths.get(path.toString(), "other_data"), otherData, StandardOpenOption.CREATE); + + Service service = Service.QORTAL_METADATA; + assertTrue(service.isValidationRequired()); + + // There are multiple files, so we don't know which one to parse as JSON + assertEquals(ValidationResult.MISSING_KEYS, service.validate(path)); + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java new file mode 100644 index 00000000..5f76c9c0 --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java @@ -0,0 +1,76 @@ +package org.qortal.test.arbitrary; + +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataDigest; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataFile.*; +import org.qortal.arbitrary.ArbitraryDataReader; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.RegisterNameTransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.ArbitraryUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.junit.Assert.*; + +public class ArbitraryTransactionMetadataTests extends Common { + + @Before + public void beforeTest() throws DataException, IllegalAccessException { + Common.useDefaultSettings(); + + // Set difficulty to 1 to speed up the tests + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + } + + @Test + public void testMultipleChunks() throws DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Create PUT transaction + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize); + + // Check the chunk count is correct + assertEquals(10, arbitraryDataFile.chunkCount()); + + // Now build the latest data state for this name + ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier); + arbitraryDataReader.loadSynchronously(true); + Path initialLayerPath = arbitraryDataReader.getFilePath(); + ArbitraryDataDigest initialLayerDigest = new ArbitraryDataDigest(initialLayerPath); + initialLayerDigest.compute(); + + // Its directory hash should match the original directory hash + ArbitraryDataDigest path1Digest = new ArbitraryDataDigest(path1); + path1Digest.compute(); + assertEquals(path1Digest.getHash58(), initialLayerDigest.getHash58()); + } + } + +} diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java new file mode 100644 index 00000000..150038ca --- /dev/null +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java @@ -0,0 +1,81 @@ +package org.qortal.test.arbitrary; + +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.exception.MissingDataException; +import org.qortal.arbitrary.misc.Service; +import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.RegisterNameTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.ArbitraryUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.transaction.ArbitraryTransaction; +import org.qortal.utils.Base58; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.junit.Assert.*; + +public class ArbitraryTransactionTests extends Common { + + @Before + public void beforeTest() throws DataException, IllegalAccessException { + Common.useDefaultSettings(); + } + + @Test + public void testDifficultyTooLow() throws IllegalAccessException, DataException, IOException, MissingDataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize); + + // Check that nonce validation succeeds + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure the nonce validation fails + // Note: there is a very tiny chance this could succeed due to being extremely lucky + // and finding a high difficulty nonce in the first couple of cycles. It will be rare + // enough that we shouldn't need to account for it. + assertFalse(transaction.isSignatureValid()); + + // Reduce difficulty back to 1, to double check + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + assertTrue(transaction.isSignatureValid()); + + } + + } + +} diff --git a/src/test/java/org/qortal/test/common/ArbitraryUtils.java b/src/test/java/org/qortal/test/common/ArbitraryUtils.java new file mode 100644 index 00000000..5a67ccae --- /dev/null +++ b/src/test/java/org/qortal/test/common/ArbitraryUtils.java @@ -0,0 +1,89 @@ +package org.qortal.test.common; + +import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataTransactionBuilder; +import org.qortal.arbitrary.misc.Service; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.transaction.Transaction; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Random; + +import static org.junit.Assert.assertEquals; + +public class ArbitraryUtils { + + public static ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier, + ArbitraryTransactionData.Method method, Service service, PrivateKeyAccount account, + int chunkSize) throws DataException { + + ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( + repository, publicKey58, path, name, method, service, identifier); + + txnBuilder.setChunkSize(chunkSize); + txnBuilder.build(); + txnBuilder.computeNonce(); + ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData(); + Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, account); + assertEquals(Transaction.ValidationResult.OK, result); + BlockUtils.mintBlock(repository); + + // We need a new ArbitraryDataFile instance because the files will have been moved to the signature's folder + byte[] hash = txnBuilder.getArbitraryDataFile().getHash(); + byte[] signature = transactionData.getSignature(); + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash()); + + return arbitraryDataFile; + } + + public static ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier, + ArbitraryTransactionData.Method method, Service service, PrivateKeyAccount account) throws DataException { + + // Use default chunk size + int chunkSize = ArbitraryDataFile.CHUNK_SIZE; + return ArbitraryUtils.createAndMintTxn(repository, publicKey58, path, name, identifier, method, service, account, chunkSize); + } + + public static Path generateRandomDataPath(int length) throws IOException { + // Create a file in a random temp directory + Path tempDir = Files.createTempDirectory("generateRandomDataPath"); + File file = new File(Paths.get(tempDir.toString(), "file.txt").toString()); + file.deleteOnExit(); + + // Write a random string to the file + BufferedWriter file1Writer = new BufferedWriter(new FileWriter(file)); + String initialString = ArbitraryUtils.generateRandomString(length - 1); // -1 due to newline at EOF + + // Add a newline every 50 chars + // initialString = initialString.replaceAll("(.{50})", "$1\n"); + + file1Writer.write(initialString); + file1Writer.newLine(); + file1Writer.close(); + + return tempDir; + } + + public static String generateRandomString(int length) { + int leftLimit = 48; // numeral '0' + int rightLimit = 122; // letter 'z' + Random random = new Random(); + + return random.ints(leftLimit, rightLimit + 1) + .filter(i -> (i <= 57 || i >= 65) && (i <= 90 || i >= 97)) + .limit(length) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + } + +} diff --git a/src/test/java/org/qortal/test/common/transaction/ArbitraryTestTransaction.java b/src/test/java/org/qortal/test/common/transaction/ArbitraryTestTransaction.java index 0b48748d..d831eaf1 100644 --- a/src/test/java/org/qortal/test/common/transaction/ArbitraryTestTransaction.java +++ b/src/test/java/org/qortal/test/common/transaction/ArbitraryTestTransaction.java @@ -4,6 +4,7 @@ import java.util.ArrayList; import java.util.List; import org.qortal.account.PrivateKeyAccount; +import org.qortal.arbitrary.misc.Service; import org.qortal.asset.Asset; import org.qortal.data.PaymentData; import org.qortal.data.transaction.ArbitraryTransactionData; @@ -16,8 +17,21 @@ import org.qortal.utils.Amounts; public class ArbitraryTestTransaction extends TestTransaction { public static TransactionData randomTransaction(Repository repository, PrivateKeyAccount account, boolean wantValid) throws DataException { - final int version = 4; - final int service = 123; + final int version = 5; + final Service service = Service.ARBITRARY_DATA; + final int nonce = 0; + final int size = 4 * 1024 * 1024; + final String name = "TEST"; + final String identifier = "qortal_avatar"; + final ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.PUT; + + final byte[] secret = new byte[32]; + random.nextBytes(secret); + + final ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.ZIP; + + final byte[] metadataHash = new byte[32]; + random.nextBytes(metadataHash); byte[] data = new byte[1024]; random.nextBytes(data); @@ -31,7 +45,8 @@ public class ArbitraryTestTransaction extends TestTransaction { List payments = new ArrayList<>(); payments.add(new PaymentData(recipient, assetId, amount)); - return new ArbitraryTransactionData(generateBase(account), version, service, data, dataType, payments); + return new ArbitraryTransactionData(generateBase(account), version, service, nonce, size,name, identifier, + method, secret, compression, data, dataType, metadataHash, payments); } } diff --git a/src/test/java/org/qortal/test/common/transaction/PresenceTestTransaction.java b/src/test/java/org/qortal/test/common/transaction/PresenceTestTransaction.java new file mode 100644 index 00000000..64df87f4 --- /dev/null +++ b/src/test/java/org/qortal/test/common/transaction/PresenceTestTransaction.java @@ -0,0 +1,25 @@ +package org.qortal.test.common.transaction; + +import com.google.common.primitives.Longs; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.data.transaction.PresenceTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.transaction.PresenceTransaction.PresenceType; +import org.qortal.utils.NTP; + +public class PresenceTestTransaction extends TestTransaction { + + public static TransactionData randomTransaction(Repository repository, PrivateKeyAccount account, boolean wantValid) throws DataException { + final int nonce = 0; + + byte[] tradePrivateKey = new byte[32]; + PrivateKeyAccount tradeNativeAccount = new PrivateKeyAccount(repository, tradePrivateKey); + long timestamp = NTP.getTime(); + byte[] timestampSignature = tradeNativeAccount.sign(Longs.toByteArray(timestamp)); + + return new PresenceTransactionData(generateBase(account), nonce, PresenceType.TRADE_BOT, timestampSignature); + } + +} diff --git a/src/test/resources/arbitrary/demo1/dir1/dir2/lorem5.txt b/src/test/resources/arbitrary/demo1/dir1/dir2/lorem5.txt new file mode 100644 index 00000000..ef07da1f --- /dev/null +++ b/src/test/resources/arbitrary/demo1/dir1/dir2/lorem5.txt @@ -0,0 +1 @@ +Pellentesque laoreet laoreet dui ut volutpat. diff --git a/src/test/resources/arbitrary/demo1/dir1/lorem4.txt b/src/test/resources/arbitrary/demo1/dir1/lorem4.txt new file mode 100644 index 00000000..6ac4bdd8 --- /dev/null +++ b/src/test/resources/arbitrary/demo1/dir1/lorem4.txt @@ -0,0 +1,10 @@ +Pellentesque mollis risus laoreet neque lobortis, ut euismod nisl gravida. +Nullam sit amet scelerisque sapien, id aliquet elit. Suspendisse eu +accumsan eros. Nullam non nunc ut risus facilisis posuere sed sed ipsum. +Pellentesque habitant morbi tristique senectus et netus et malesuada fames +ac turpis egestas. Nullam magna felis, vehicula a accumsan luctus, vulputate +vitae justo. Integer mollis lacus eu nisi iaculis, ac ultrices sem aliquam. +Sed ac lacus eget nibh posuere sodales. Phasellus sodales, augue ac +tincidunt scelerisque, mi erat varius mauris, sed blandit ex nisl ut +justo. Etiam ac nisl venenatis, malesuada odio vitae, blandit velit. +Phasellus congue leo a porttitor hendrerit. diff --git a/src/test/resources/arbitrary/demo1/lorem1.txt b/src/test/resources/arbitrary/demo1/lorem1.txt new file mode 100644 index 00000000..5721466c --- /dev/null +++ b/src/test/resources/arbitrary/demo1/lorem1.txt @@ -0,0 +1,10 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Ut ligula felis, imperdiet nec placerat at, placerat +quis diam. Praesent a ultricies lacus. +Aenean luctus blandit dui. Quisque vel augue +diam. Nulla libero libero, condimentum sed +accumsan eu, elementum sit amet turpis. +In semper risus ac libero lobortis, +ut consectetur urna euismod. +Donec ut erat quis mi eleifend tincidunt +aliquet vitae lacus. diff --git a/src/test/resources/arbitrary/demo1/lorem2.txt b/src/test/resources/arbitrary/demo1/lorem2.txt new file mode 100644 index 00000000..8a9c4367 --- /dev/null +++ b/src/test/resources/arbitrary/demo1/lorem2.txt @@ -0,0 +1 @@ +Quisque viverra neque quis eros dapibus diff --git a/src/test/resources/arbitrary/demo1/lorem3.txt b/src/test/resources/arbitrary/demo1/lorem3.txt new file mode 100644 index 00000000..5db7e985 --- /dev/null +++ b/src/test/resources/arbitrary/demo1/lorem3.txt @@ -0,0 +1 @@ +Sed ac magna pretium, suscipit mauris sed, ultrices nunc. diff --git a/src/test/resources/arbitrary/demo2/dir1/dir2/lorem5.txt b/src/test/resources/arbitrary/demo2/dir1/dir2/lorem5.txt new file mode 100644 index 00000000..73b058e4 --- /dev/null +++ b/src/test/resources/arbitrary/demo2/dir1/dir2/lorem5.txt @@ -0,0 +1 @@ +Pellentesque laoreet laoreet dui ut volutpat. Sentence added. diff --git a/src/test/resources/arbitrary/demo2/dir1/lorem4.txt b/src/test/resources/arbitrary/demo2/dir1/lorem4.txt new file mode 100644 index 00000000..30c17943 --- /dev/null +++ b/src/test/resources/arbitrary/demo2/dir1/lorem4.txt @@ -0,0 +1,11 @@ +Pellentesque mollis risus laoreet neque lobortis, ut euismod nisl gravida. +Nullam sit amet scelerisque sapien, id aliquet elit. Suspendisse eu +accumsan eros. Nullam non nunc ut risus facilisis posuere sed sed ipsum. +Pellentesque habitant morbi tristique senectus et netus et malesuada fames +ac turpis egestas. Nullam magna felis; vehicula a accumsan luctus, vulputate +vitae justo. Integer mollis lacus eu nisi iaculis, ac ultrices sem aliquam. +Sed ac lacus eget nibh posuere sodales. Phasellus sodales, augue ac +tincidunt scelerisque, mi erat Varius mauris, sed blandit ex nisl ut +justo. Etiam ac nisl venenatis, malesuada odio vitae, blandit velit. +Phasellus congue leo a porttitor hendrerit. +Line added. diff --git a/src/test/resources/arbitrary/demo2/lorem1.txt b/src/test/resources/arbitrary/demo2/lorem1.txt new file mode 100644 index 00000000..4df35553 --- /dev/null +++ b/src/test/resources/arbitrary/demo2/lorem1.txt @@ -0,0 +1,10 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Ut ligula felis, imperdiet nec placerat at, placerat +quis diam. Praesent a ultricies lacus. +Aenean luctus blandit dui. Quisque vel augue +diam. Nulla libero libero; condimentum sed +accumsan eu, elementum sit amet turpis. +In semper risus ac libero lobortis, +ut consectetur urna euismod. +Donec ut erat quis mi eleifend tincidunt +aliquet vitae lacus. diff --git a/src/test/resources/arbitrary/demo2/lorem2.txt b/src/test/resources/arbitrary/demo2/lorem2.txt new file mode 100644 index 00000000..de4628f2 --- /dev/null +++ b/src/test/resources/arbitrary/demo2/lorem2.txt @@ -0,0 +1,2 @@ +Quisque viverra neque +quis eros dapibus diff --git a/src/test/resources/arbitrary/demo2/lorem3.txt b/src/test/resources/arbitrary/demo2/lorem3.txt new file mode 100644 index 00000000..5db7e985 --- /dev/null +++ b/src/test/resources/arbitrary/demo2/lorem3.txt @@ -0,0 +1 @@ +Sed ac magna pretium, suscipit mauris sed, ultrices nunc. diff --git a/src/test/resources/arbitrary/demo3/dir1/dir2/lorem5.txt b/src/test/resources/arbitrary/demo3/dir1/dir2/lorem5.txt new file mode 100644 index 00000000..74d0fda9 --- /dev/null +++ b/src/test/resources/arbitrary/demo3/dir1/dir2/lorem5.txt @@ -0,0 +1 @@ +Pellentesque laoreet laoreet dui ut volutpat. Sentence modified. diff --git a/src/test/resources/arbitrary/demo3/dir1/lorem4.txt b/src/test/resources/arbitrary/demo3/dir1/lorem4.txt new file mode 100644 index 00000000..d2cd1ea4 --- /dev/null +++ b/src/test/resources/arbitrary/demo3/dir1/lorem4.txt @@ -0,0 +1,11 @@ +Pellentesque mollis risus laoreet neque lobortis, ut euismod nisl gravida. +Nullam sit amet scelerisque sapien, id aliquet elit. Suspendisse eu +accumsan eros. Nullam non nunc ut risus facilisis posuere sed sed ipsum. +Pellentesque habitant morbi tristique senectus et netus et malesuada fames +ac turpis egestas. Nullam magna felis; vehicula a accumsan luctus, vulputate +vitae justo. Integer mollis lacus eu nisi iaculis, ac ultrices sem aliquam. +Sed ac lacus eget nibh posuere sodales. Phasellus sodales, augue ac +tincidunt scelerisque, mi erat Varius mauris, sed blandit ex nisl ut +justo. Etiam ac nisl venenatis, malesuada odio vitae, blandit velit. +Phasellus congue leo a porttitor hendrerit. +Line modified. diff --git a/src/test/resources/arbitrary/demo3/lorem1.txt b/src/test/resources/arbitrary/demo3/lorem1.txt new file mode 100644 index 00000000..0def0e23 --- /dev/null +++ b/src/test/resources/arbitrary/demo3/lorem1.txt @@ -0,0 +1,10 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Ut ligula felis, imperdiet nec placerat at, placerat +quis diam. Praesent a ultricies lacus. +Aenean luctus blandit dui. Quisque vel augue +diam. Nulla libero libero; condimentum sed +accumsan eu, elementum sit amet turpis. +In semper risus ac libero lobortis, +ut consectetur urna euismod! +Donec ut erat quis mi eleifend tincidunt +aliquet vitae lacus. diff --git a/src/test/resources/arbitrary/demo3/lorem2.txt b/src/test/resources/arbitrary/demo3/lorem2.txt new file mode 100644 index 00000000..47158e17 --- /dev/null +++ b/src/test/resources/arbitrary/demo3/lorem2.txt @@ -0,0 +1,10 @@ +Quisque viverra neque +quis eros dapibus +Quisque viverra neque +quis eros dapibus +Quisque viverra neque +quis eros dapibus +Quisque viverra neque +quis eros dapibus +Quisque viverra neque +quis eros dapibus diff --git a/src/test/resources/arbitrary/demo3/lorem3.txt b/src/test/resources/arbitrary/demo3/lorem3.txt new file mode 100644 index 00000000..5db7e985 --- /dev/null +++ b/src/test/resources/arbitrary/demo3/lorem3.txt @@ -0,0 +1 @@ +Sed ac magna pretium, suscipit mauris sed, ultrices nunc. diff --git a/src/test/resources/test-settings-v2.json b/src/test/resources/test-settings-v2.json index 83bdf197..d9f6cb0c 100644 --- a/src/test/resources/test-settings-v2.json +++ b/src/test/resources/test-settings-v2.json @@ -10,5 +10,11 @@ "testNtpOffset": 0, "minPeers": 0, "pruneBlockLimit": 100, - "bootstrapFilenamePrefix": "test-" + "bootstrapFilenamePrefix": "test-", + "dataPath": "data-test", + "tempDataPath": "data-test/_temp", + "listsPath": "lists-test", + "storagePolicy": "FOLLOWED_AND_VIEWED", + "maxStorageCapacity": 104857600, + "localAuthBypassEnabled": true } diff --git a/tools/block-timings.sh b/tools/block-timings.sh index 5324209b..88d8d643 100755 --- a/tools/block-timings.sh +++ b/tools/block-timings.sh @@ -69,11 +69,13 @@ function fetch_and_process_blocks { online_accounts_count=$(echo "${block_minting_info}" | jq -r .onlineAccountsCount) key_distance_ratio=$(echo "${block_minting_info}" | jq -r .keyDistanceRatio) time_delta=$(echo "${block_minting_info}" | jq -r .timeDelta) + timestamp=$(echo "${block_minting_info}" | jq -r .timestamp) time_offset=$(calculate_time_offset "${key_distance_ratio}") block_time=$((target-deviation+time_offset)) echo "=== BLOCK ${height} ===" + echo "Timestamp: ${timestamp}" echo "Minter level: ${minter_level}" echo "Online accounts: ${online_accounts_count}" echo "Key distance ratio: ${key_distance_ratio}" diff --git a/tools/publish-auto-update-v5.pl b/tools/publish-auto-update-v5.pl new file mode 100755 index 00000000..aad49d4e --- /dev/null +++ b/tools/publish-auto-update-v5.pl @@ -0,0 +1,160 @@ +#!/usr/bin/env perl + +use strict; +use warnings; +use POSIX; +use Getopt::Std; + +sub usage() { + die("usage: $0 [-p api-port] dev-private-key [short-commit-hash]\n"); +} + +my %opt; +getopts('p:', \%opt); + +usage() if @ARGV < 1 || @ARGV > 2; + +my $port = $opt{p} || 12391; +my $privkey = shift @ARGV; +my $commit_hash = shift @ARGV; + +my $git_dir = `git rev-parse --show-toplevel`; +die("Cannot determine git top level dir\n") unless $git_dir; + +chomp $git_dir; +chdir($git_dir) || die("Can't change directory to $git_dir: $!\n"); + +open(POM, '<', 'pom.xml') || die ("Can't open 'pom.xml': $!\n"); +my $project; +while () { + if (m/(\w+)<.artifactId>/o) { + $project = $1; + last; + } +} +close(POM); + +# Do we need to determine commit hash? +unless ($commit_hash) { + # determine git branch + my $branch_name = ` git symbolic-ref -q HEAD `; + chomp $branch_name; + $branch_name =~ s|^refs/heads/||; # ${branch_name##refs/heads/} + + # short-form commit hash on base branch (non-auto-update) + $commit_hash ||= `git show --no-patch --format=%h`; + die("Can't find commit hash\n") if ! defined $commit_hash; + chomp $commit_hash; + printf "Commit hash on '%s' branch: %s\n", $branch_name, $commit_hash; +} else { + printf "Using given commit hash: %s\n", $commit_hash; +} + +# build timestamp / commit timestamp on base branch +my $timestamp = `git show --no-patch --format=%ct ${commit_hash}`; +die("Can't determine commit timestamp\n") if ! defined $timestamp; +$timestamp *= 1000; # Convert to milliseconds + +# locate sha256 utility +my $SHA256 = `which sha256sum || which sha256`; +chomp $SHA256; +die("Can't find sha256sum or sha256\n") unless length($SHA256) > 0; + +# SHA256 of actual update file +my $sha256 = `git show auto-update-${commit_hash}:${project}.update | ${SHA256} | head -c 64`; +die("Can't calculate SHA256 of ${project}.update\n") unless $sha256 =~ m/(\S{64})/; +chomp $sha256; + +# long-form commit hash of HEAD on auto-update branch +my $update_hash = `git rev-parse refs/heads/auto-update-${commit_hash}`; +die("Can't find commit hash for HEAD on auto-update-${commit_hash} branch\n") if ! defined $update_hash; +chomp $update_hash; + +printf "Build timestamp (ms): %d / 0x%016x\n", $timestamp, $timestamp; +printf "Auto-update commit hash: %s\n", $update_hash; +printf "SHA256 of ${project}.update: %s\n", $sha256; + +my $tx_type = 10; +my $tx_timestamp = time() * 1000; +my $tx_group_id = 1; +my $service = 1; +printf "\nARBITRARY(%d) transaction with timestamp %d, txGroupID %d and service %d\n", $tx_type, $tx_timestamp, $tx_group_id, $service; + +my $data_hex = sprintf "%016x%s%s", $timestamp, $update_hash, $sha256; +printf "\nARBITRARY transaction data payload: %s\n", $data_hex; + +my $n_payments = 0; +my $data_type = 1; # RAW_DATA +my $data_length = length($data_hex) / 2; # two hex chars per byte +my $fee = 0; +my $nonce = 0; +my $name_length = 0; +my $identifier_length = 0; +my $method = 0; # PUT +my $secret_length = 0; +my $compression = 0; # None +my $metadata_hash_length = 0; + +die("Something's wrong: data length is not 60 bytes!\n") if $data_length != 60; + +my $pubkey = `curl --silent --url http://localhost:${port}/utils/publickey --data ${privkey}`; +die("Can't convert private key to public key:\n$pubkey\n") unless $pubkey =~ m/^\w{44}$/; +printf "\nPublic key: %s\n", $pubkey; + +my $pubkey_hex = `curl --silent --url http://localhost:${port}/utils/frombase58 --data ${pubkey}`; +die("Can't convert base58 public key to hex:\n$pubkey_hex\n") unless $pubkey_hex =~ m/^[A-Za-z0-9]{64}$/; +printf "Public key hex: %s\n", $pubkey_hex; + +my $address = `curl --silent --url http://localhost:${port}/addresses/convert/${pubkey}`; +die("Can't convert base58 public key to address:\n$address\n") unless $address =~ m/^\w{33,34}$/; +printf "Address: %s\n", $address; + +my $reference = `curl --silent --url http://localhost:${port}/addresses/lastreference/${address}`; +die("Can't fetch last reference for $address:\n$reference\n") unless $reference =~ m/^\w{87,88}$/; +printf "Last reference: %s\n", $reference; + +my $reference_hex = `curl --silent --url http://localhost:${port}/utils/frombase58 --data ${reference}`; +die("Can't convert base58 reference to hex:\n$reference_hex\n") unless $reference_hex =~ m/^[A-Za-z0-9]{128}$/; +printf "Last reference hex: %s\n", $reference_hex; + +my $raw_tx_hex = sprintf("%08x%016x%08x%s%s%08x%08x%08x%08x%08x%08x%08x%08x%02x%08x%s%08x%08x%016x", $tx_type, $tx_timestamp, $tx_group_id, $reference_hex, $pubkey_hex, $nonce, $name_length, $identifier_length, $method, $secret_length, $compression, $n_payments, $service, $data_type, $data_length, $data_hex, $data_length, $metadata_hash_length, $fee); +printf "\nRaw transaction hex:\n%s\n", $raw_tx_hex; + +my $raw_tx = `curl --silent --url http://localhost:${port}/utils/tobase58/${raw_tx_hex}`; +die("Can't convert raw transaction hex to base58:\n$raw_tx\n") unless $raw_tx =~ m/^\w{300,320}$/; # Roughly 305 to 320 base58 chars +printf "\nRaw transaction (base58):\n%s\n", $raw_tx; + +my $computed_tx = `curl --silent -X POST --url http://localhost:${port}/arbitrary/compute -d "${raw_tx}"`; +die("Can't compute nonce for transaction:\n$computed_tx\n") unless $computed_tx =~ m/^\w{300,320}$/; # Roughly 300 to 320 base58 chars +printf "\nRaw computed transaction (base58):\n%s\n", $computed_tx; + +my $sign_data = qq|' { "privateKey": "${privkey}", "transactionBytes": "${computed_tx}" } '|; +my $signed_tx = `curl --silent -H "accept: text/plain" -H "Content-Type: application/json" --url http://localhost:${port}/transactions/sign --data ${sign_data}`; +die("Can't sign raw transaction:\n$signed_tx\n") unless $signed_tx =~ m/^\w{390,410}$/; # +90ish longer than $raw_tx +printf "\nSigned transaction:\n%s\n", $signed_tx; + +# Check we can actually fetch update +my $origin = `git remote get-url origin`; +die("Unable to get github url for 'origin'?\n") unless $origin && $origin =~ m/:(.*)\.git$/; +my $repo = $1; +my $update_url = "https://github.com/${repo}/raw/${update_hash}/${project}.update"; + +my $fetch_result = `curl --silent -o /dev/null --location --range 0-1 --head --write-out '%{http_code}' --url ${update_url}`; +die("\nUnable to fetch update from ${update_url}\n") if $fetch_result ne '200'; +printf "\nUpdate fetchable from ${update_url}\n"; + +# Flush STDOUT after every output +$| = 1; +print "\n"; +for (my $delay = 5; $delay > 0; --$delay) { + printf "\rSubmitting transaction in %d second%s... CTRL-C to abort ", $delay, ($delay != 1 ? 's' : ''); + sleep 1; +} + +printf "\rSubmitting transaction NOW... \n"; +my $result = `curl --silent --url http://localhost:${port}/transactions/process --data ${signed_tx}`; +chomp $result; +die("Transaction wasn't accepted:\n$result\n") unless $result eq 'true'; + +my $decoded_tx = `curl --silent -H "Content-Type: application/json" --url http://localhost:${port}/transactions/decode --data ${signed_tx}`; +printf "\nTransaction accepted:\n$decoded_tx\n"; diff --git a/tools/qdata b/tools/qdata new file mode 100755 index 00000000..b1e9720b --- /dev/null +++ b/tools/qdata @@ -0,0 +1,129 @@ +#!/usr/bin/env bash + +# Qortal defaults +host="localhost" +port=12393 + +if [ -z "$*" ]; then + echo "Usage:" + echo + echo "Host/update data:" + echo "qdata POST [service] [name] PATH [dirpath] " + echo "qdata POST [service] [name] STRING [data-string] " + echo + echo "Fetch data:" + echo "qdata GET [service] [name] " + echo + echo "Notes:" + echo "- When requesting a resource, please use 'default' to indicate a file with no identifier." + echo "- The same applies when specifying the relative path to a file within the data structure; use 'default'" + echo " to indicate a single file resource." + echo + exit +fi + +method=$1 +service=$2 +name=$3 + +if [ -z "${method}" ]; then + echo "Error: missing method"; exit +fi +if [ -z "${service}" ]; then + echo "Error: missing service"; exit +fi +if [ -z "${name}" ]; then + echo "Error: missing name"; exit +fi + + +if [[ "${method}" == "POST" ]]; then + type=$4 + data=$5 + identifier=$6 + + if [ -z "${data}" ]; then + if [[ "${type}" == "PATH" ]]; then + echo "Error: missing directory"; exit + elif [[ "${type}" == "STRING" ]]; then + echo "Error: missing data string"; exit + else + echo "Error: unrecognized type"; exit + fi + fi + if [ -z "${QORTAL_PRIVKEY}" ]; then + echo "Error: missing private key. Set it by running: export QORTAL_PRIVKEY=privkeyhere"; exit + fi + + if [ -z "${identifier}" ]; then + identifier="default" + fi + + # Create type component in URL + if [[ "${type}" == "PATH" ]]; then + type_component="" + elif [[ "${type}" == "STRING" ]]; then + type_component="/string" + fi + + echo "Creating transaction - this can take a while..." + tx_data=$(curl --silent --insecure -X ${method} "http://${host}:${port}/arbitrary/${service}/${name}/${identifier}${type_component}" -d "${data}") + + if [[ "${tx_data}" == *"error"* || "${tx_data}" == *"ERROR"* ]]; then + echo "${tx_data}"; exit + elif [ -z "${tx_data}" ]; then + echo "Error: no transaction data returned"; exit + fi + + echo "Computing nonce..." + computed_tx_data=$(curl --silent --insecure -X POST "http://${host}:${port}/arbitrary/compute" -H "Content-Type: application/json" -d "${tx_data}") + if [[ "${computed_tx_data}" == *"error"* || "${computed_tx_data}" == *"ERROR"* ]]; then + echo "${computed_tx_data}"; exit + fi + + echo "Signing..." + signed_tx_data=$(curl --silent --insecure -X POST "http://${host}:${port}/transactions/sign" -H "Content-Type: application/json" -d "{\"privateKey\":\"${QORTAL_PRIVKEY}\",\"transactionBytes\":\"${computed_tx_data}\"}") + if [[ "${signed_tx_data}" == *"error"* || "${signed_tx_data}" == *"ERROR"* ]]; then + echo "${signed_tx_data}"; exit + fi + + echo "Broadcasting..." + success=$(curl --silent --insecure -X POST "http://${host}:${port}/transactions/process" -H "Content-Type: text/plain" -d "${signed_tx_data}") + if [[ "${success}" == "true" ]]; then + echo "Transaction broadcast successfully" + else + echo "Error when broadcasting transaction. Please try again." + echo "Response: ${success}" + fi + +elif [[ "${method}" == "GET" ]]; then + identifier=$4 + filepath=$5 + rebuild=$6 + + if [ -z "${rebuild}" ]; then + rebuild="false" + fi + + # Handle default + if [[ "${filepath}" == "default" ]]; then + filepath="" + fi + + # We use a different API depending on whether or not an identifier is supplied + if [ -n "${identifier}" ]; then + response=$(curl --silent --insecure -X GET "http://${host}:${port}/arbitrary/${service}/${name}/${identifier}?rebuild=${rebuild}&filepath=${filepath}") + else + response=$(curl --silent --insecure -X GET "http://${host}:${port}/arbitrary/${service}/${name}?rebuild=${rebuild}&filepath=${filepath}") + fi + + if [ -z "${response}" ]; then + echo "Empty response from ${host}:${port}" + fi + if [[ "${response}" == *"error"* || "${response}" == *"ERROR"* ]]; then + echo "${response}"; exit + fi + + echo "${response}" + +fi