Merge pull request #210 from crowetic/master

Added kenny+alpha+phil changes to create what should be release candidate. Merging upon verification from alpha+kenny.
This commit is contained in:
crowetic 2024-11-07 10:46:40 -08:00 committed by GitHub
commit ea10759bd3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 2113 additions and 406 deletions

View File

@ -22,12 +22,12 @@
<dagger.version>1.2.2</dagger.version>
<extendedset.version>0.12.3</extendedset.version>
<git-commit-id-plugin.version>4.9.10</git-commit-id-plugin.version>
<grpc.version>1.66.0</grpc.version>
<guava.version>33.3.0-jre</guava.version>
<grpc.version>1.68.1</grpc.version>
<guava.version>33.3.1-jre</guava.version>
<hamcrest-library.version>2.2</hamcrest-library.version>
<homoglyph.version>1.2.1</homoglyph.version>
<hsqldb.version>2.5.1</hsqldb.version>
<icu4j.version>75.1</icu4j.version>
<icu4j.version>76.1</icu4j.version>
<java-diff-utils.version>4.12</java-diff-utils.version>
<javax.servlet-api.version>4.0.1</javax.servlet-api.version>
<jaxb-runtime.version>2.3.9</jaxb-runtime.version>
@ -49,7 +49,7 @@
<maven-reproducible-build-plugin.version>0.17</maven-reproducible-build-plugin.version>
<maven-resources-plugin.version>3.3.1</maven-resources-plugin.version>
<maven-shade-plugin.version>3.6.0</maven-shade-plugin.version>
<maven-surefire-plugin.version>3.5.0</maven-surefire-plugin.version>
<maven-surefire-plugin.version>3.5.2</maven-surefire-plugin.version>
<protobuf.version>3.25.3</protobuf.version>
<replacer.version>1.5.3</replacer.version>
<simplemagic.version>1.17</simplemagic.version>

View File

@ -1,14 +1,17 @@
package org.qortal;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
import org.qortal.api.ApiKey;
import org.qortal.api.ApiRequest;
import org.qortal.controller.Controller;
import org.qortal.controller.RestartNode;
import org.qortal.settings.Settings;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.nio.file.Files;
@ -16,6 +19,8 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.Security;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import static org.qortal.controller.RestartNode.AGENTLIB_JVM_HOLDER_ARG;
@ -57,15 +62,32 @@ public class ApplyRestart {
if (!shutdownNode())
return;
// Restart node
restartNode(args);
try {
// Give some time for shutdown
TimeUnit.SECONDS.sleep(30);
LOGGER.info("Restarting...");
// Remove blockchain lock if exist
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (blockchainLock.isLocked())
blockchainLock.unlock();
// Remove blockchain lock file if exist
TimeUnit.SECONDS.sleep(60);
deleteLock();
// Restart node
TimeUnit.SECONDS.sleep(30);
restartNode(args);
LOGGER.info("Restarting...");
} catch (InterruptedException e) {
LOGGER.error("Unable to restart", e);
}
}
private static boolean shutdownNode() {
String baseUri = "http://localhost:" + Settings.getInstance().getApiPort() + "/";
LOGGER.info(() -> String.format("Shutting down node using API via %s", baseUri));
LOGGER.debug(() -> String.format("Shutting down node using API via %s", baseUri));
// The /admin/stop endpoint requires an API key, which may or may not be already generated
boolean apiKeyNewlyGenerated = false;
@ -134,7 +156,22 @@ public class ApplyRestart {
apiKey.delete();
} catch (IOException e) {
LOGGER.info("Error loading or deleting API key: {}", e.getMessage());
LOGGER.error("Error loading or deleting API key: {}", e.getMessage());
}
}
private static void deleteLock() {
// Get the repository path from settings
String repositoryPath = Settings.getInstance().getRepositoryPath();
LOGGER.debug(String.format("Repository path is: %s", repositoryPath));
try {
Path root = Paths.get(repositoryPath);
File lockFile = new File(root.resolve("blockchain.lck").toUri());
LOGGER.debug("Lockfile is: {}", lockFile);
FileUtils.forceDelete(FileUtils.getFile(lockFile));
} catch (IOException e) {
LOGGER.error("Error deleting blockchain lock file: {}", e.getMessage());
}
}
@ -150,9 +187,10 @@ public class ApplyRestart {
List<String> javaCmd;
if (Files.exists(exeLauncher)) {
javaCmd = Arrays.asList(exeLauncher.toString());
javaCmd = List.of(exeLauncher.toString());
} else {
javaCmd = new ArrayList<>();
// Java runtime binary itself
javaCmd.add(javaBinary.toString());

View File

@ -64,6 +64,7 @@ public class BlockMinter extends Thread {
@Override
public void run() {
Thread.currentThread().setName("BlockMinter");
Thread.currentThread().setPriority(MAX_PRIORITY);
if (Settings.getInstance().isTopOnly() || Settings.getInstance().isLite()) {
// Top only and lite nodes do not sign blocks

View File

@ -13,6 +13,7 @@ import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.tradebot.TradeBot;
@ -35,6 +36,7 @@ import org.qortal.network.Peer;
import org.qortal.network.PeerAddress;
import org.qortal.network.message.*;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
@ -99,7 +101,7 @@ public class Controller extends Thread {
private final long buildTimestamp; // seconds
private final String[] savedArgs;
private ExecutorService callbackExecutor = Executors.newFixedThreadPool(3);
private ExecutorService callbackExecutor = Executors.newFixedThreadPool(4);
private volatile boolean notifyGroupMembershipChange = false;
/** Latest blocks on our chain. Note: tail/last is the latest block. */
@ -406,8 +408,17 @@ public class Controller extends Thread {
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
RepositoryManager.rebuildTransactionSequences(repository);
// RepositoryManager.rebuildTransactionSequences(repository);
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
if( Settings.getInstance().isDbCacheEnabled() ) {
LOGGER.info("Db Cache Starting ...");
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
hsqldbDataCacheManager.start();
}
else {
LOGGER.info("Db Cache Disabled");
}
}
} catch (DataException e) {
// If exception has no cause or message then repository is in use by some other process.
@ -489,7 +500,6 @@ public class Controller extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Shutdown hook");
Controller.getInstance().shutdown();
}
});
@ -569,10 +579,31 @@ public class Controller extends Thread {
// If GUI is enabled, we're no longer starting up but actually running now
Gui.getInstance().notifyRunning();
// Check every 10 minutes to see if the block minter is running
Timer timer = new Timer();
// Check every 10 minutes if we have enough connected peers
Timer checkConnectedPeers = new Timer();
timer.schedule(new TimerTask() {
checkConnectedPeers.schedule(new TimerTask() {
@Override
public void run() {
// Get the connected peers
int myConnectedPeers = Network.getInstance().getImmutableHandshakedPeers().size();
LOGGER.debug("Node have {} connected peers", myConnectedPeers);
if (myConnectedPeers == 0) {
// Restart node if we have 0 peers
LOGGER.info("Node have no connected peers, restarting node");
try {
RestartNode.attemptToRestart();
} catch (Exception e) {
LOGGER.error("Unable to restart the node", e);
}
}
}
}, 10*60*1000, 10*60*1000);
// Check every 10 minutes to see if the block minter is running
Timer checkBlockMinter = new Timer();
checkBlockMinter.schedule(new TimerTask() {
@Override
public void run() {
if (blockMinter.isAlive()) {

View File

@ -80,7 +80,7 @@ public class OnlineAccountsManager {
// one for the transition period.
private static long[] POW_VERIFY_WORK_BUFFER = new long[getPoWBufferSize() / 8];
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts"));
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts", Thread.NORM_PRIORITY));
private volatile boolean isStopping = false;
private final Set<OnlineAccountData> onlineAccountsImportQueue = ConcurrentHashMap.newKeySet();

View File

@ -65,6 +65,7 @@ public class PirateChainWalletController extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Pirate Chain Wallet Controller");
Thread.currentThread().setPriority(MIN_PRIORITY);
try {
while (running && !Controller.isStopping()) {

View File

@ -118,8 +118,12 @@ public class Synchronizer extends Thread {
}
public static Synchronizer getInstance() {
if (instance == null)
if (instance == null) {
instance = new Synchronizer();
instance.setPriority(Settings.getInstance().getSynchronizerThreadPriority());
LOGGER.info("thread priority = " + instance.getPriority());
}
return instance;
}

View File

@ -14,6 +14,7 @@ import java.io.IOException;
import java.util.Comparator;
import java.util.Map;
import static java.lang.Thread.NORM_PRIORITY;
import static org.qortal.data.arbitrary.ArbitraryResourceStatus.Status.NOT_PUBLISHED;
@ -28,6 +29,7 @@ public class ArbitraryDataBuilderThread implements Runnable {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Builder Thread");
Thread.currentThread().setPriority(NORM_PRIORITY);
ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance();
while (!Controller.isStopping()) {

View File

@ -41,6 +41,7 @@ public class ArbitraryDataCacheManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Cache Manager");
Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!Controller.isStopping()) {

View File

@ -71,6 +71,7 @@ public class ArbitraryDataCleanupManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Cleanup Manager");
Thread.currentThread().setPriority(NORM_PRIORITY);
// Paginate queries when fetching arbitrary transactions
final int limit = 100;

View File

@ -17,6 +17,8 @@ import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import static java.lang.Thread.NORM_PRIORITY;
public class ArbitraryDataFileRequestThread implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileRequestThread.class);
@ -28,6 +30,7 @@ public class ArbitraryDataFileRequestThread implements Runnable {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data File Request Thread");
Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!Controller.isStopping()) {

View File

@ -91,6 +91,7 @@ public class ArbitraryDataManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Manager");
Thread.currentThread().setPriority(NORM_PRIORITY);
// Create data directory in case it doesn't exist yet
this.createDataDirectory();

View File

@ -36,6 +36,7 @@ public class ArbitraryDataRenderManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Render Manager");
Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!isStopping) {

View File

@ -72,6 +72,8 @@ public class ArbitraryDataStorageManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Storage Manager");
Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!isStopping) {
Thread.sleep(1000);

View File

@ -0,0 +1,29 @@
package org.qortal.controller.hsqldb;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.repository.RepositoryManager;
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
public class HSQLDBDataCacheManager extends Thread{
private ArbitraryResourceCache cache = ArbitraryResourceCache.getInstance();
private HSQLDBRepository respository;
public HSQLDBDataCacheManager(HSQLDBRepository respository) {
this.respository = respository;
}
@Override
public void run() {
Thread.currentThread().setName("HSQLDB Data Cache Manager");
this.cache
= HSQLDBCacheUtils.startCaching(
Settings.getInstance().getDbCacheThreadPriority(),
Settings.getInstance().getDbCacheFrequency(),
this.respository
);
}
}

View File

@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
import static java.lang.Thread.MIN_PRIORITY;
public class AtStatesPruner implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(AtStatesPruner.class);
@ -46,72 +48,81 @@ public class AtStatesPruner implements Runnable {
repository.saveChanges();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
// TODO: validate that the actual archived data exists before pruning it?
}
// TODO: validate that the actual archived data exists before pruning it?
}
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
if (pruneStartHeight >= upperPruneHeight)
continue;
if (pruneStartHeight >= upperPruneHeight)
continue;
LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
LOGGER.info(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
finalPruneStartHeight, upperPruneHeight));
} else {
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
repository.getATRepository().setAtPruneHeight(pruneStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
LOGGER.info(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
finalPruneStartHeight, upperPruneHeight));
} else {
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
repository.getATRepository().setAtPruneHeight(pruneStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(5*60*1000L);
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("AT States Pruning Shutting Down");
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(5*60*1000L);
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to prune AT states: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
import static java.lang.Thread.MIN_PRIORITY;
public class AtStatesTrimmer implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(AtStatesTrimmer.class);
@ -33,57 +35,66 @@ public class AtStatesTrimmer implements Runnable {
repository.saveChanges();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
// We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
// We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
if (trimStartHeight >= upperTrimHeight)
continue;
if (trimStartHeight >= upperTrimHeight)
continue;
int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getATRepository().setAtTrimHeight(trimStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
LOGGER.info(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getATRepository().setAtTrimHeight(trimStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
final int finalTrimStartHeight = trimStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("AT States Trimming Shutting Down");
}
else {
LOGGER.warn("AT States Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to trim AT states: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -15,11 +15,13 @@ import org.qortal.utils.NTP;
import java.io.IOException;
import static java.lang.Thread.NORM_PRIORITY;
public class BlockArchiver implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(BlockArchiver.class);
private static final long INITIAL_SLEEP_PERIOD = 5 * 60 * 1000L + 1234L; // ms
private static final long INITIAL_SLEEP_PERIOD = 15 * 60 * 1000L; // ms
public void run() {
Thread.currentThread().setName("Block archiver");
@ -45,71 +47,78 @@ public class BlockArchiver implements Runnable {
LOGGER.info("Starting block archiver from height {}...", startHeight);
while (!Controller.isStopping()) {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getArchiveInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null) {
continue;
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to archive if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Build cache of blocks
try {
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
repository.discardChanges();
case STOPPING:
return;
Thread.sleep(Settings.getInstance().getArchiveInterval());
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// We didn't reach our file size target, so that must mean that we don't have enough blocks
// yet or something went wrong. Sleep for a while and then try again.
repository.discardChanges();
Thread.sleep(60 * 60 * 1000L); // 1 hour
break;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
Thread.sleep( 60 * 1000L); // 1 minute
break;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null) {
continue;
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when creating block cache", e);
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to archive if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Build cache of blocks
try {
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
case STOPPING:
return;
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// We didn't reach our file size target, so that must mean that we don't have enough blocks
// yet or something went wrong. Sleep for a while and then try again.
repository.discardChanges();
Thread.sleep(2 * 60 * 60 * 1000L); // 1 hour
break;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
Thread.sleep(60 * 1000L); // 1 minute
break;
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when creating block cache", e);
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Block Archiving Shutting Down");
}
else {
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.info("Caught exception when creating block cache", e);
} catch (InterruptedException e) {
// Do nothing
} catch (Exception e) {
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
import static java.lang.Thread.NORM_PRIORITY;
public class BlockPruner implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(BlockPruner.class);
@ -48,72 +50,81 @@ public class BlockPruner implements Runnable {
}
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getBlockPruneInterval());
Thread.sleep(Settings.getInstance().getBlockPruneInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to prune if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Don't attempt to prune if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Prune all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// Prune all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
}
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
}
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
if (pruneStartHeight >= upperPruneHeight) {
continue;
}
if (pruneStartHeight >= upperPruneHeight) {
continue;
}
LOGGER.debug(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
if (numBlocksPruned > 0) {
LOGGER.debug(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
LOGGER.debug(String.format("Bumping block base prune height to %d", pruneStartHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
if (numBlocksPruned > 0) {
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
repository.saveChanges();
LOGGER.info(String.format("Bumping block base prune height to %d", pruneStartHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(10*60*1000L);
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Block Pruning Shutting Down");
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(10*60*1000L);
LOGGER.warn("Block Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to prune blocks: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -12,6 +12,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
import static java.lang.Thread.NORM_PRIORITY;
public class OnlineAccountsSignaturesTrimmer implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsSignaturesTrimmer.class);
@ -33,53 +35,62 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Trim blockchain by removing 'old' online accounts signatures
long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
// Trim blockchain by removing 'old' online accounts signatures
long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
if (trimStartHeight >= upperTrimHeight)
continue;
if (trimStartHeight >= upperTrimHeight)
continue;
int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
repository.saveChanges();
if (numSigsTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
repository.saveChanges();
int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
repository.saveChanges();
if (numSigsTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
LOGGER.info(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
repository.saveChanges();
final int finalTrimStartHeight = trimStartHeight;
LOGGER.info(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Online Accounts Signatures Trimming Shutting Down");
}
else {
LOGGER.warn("Online Accounts Signatures Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to trim online accounts signatures: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -40,7 +40,7 @@ public class PruneManager {
}
public void start() {
this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory(Settings.getInstance().getPruningThreadPriority()));
if (Settings.getInstance().isTopOnly()) {
// Top-only-sync

View File

@ -33,9 +33,10 @@ public class AddressLevelPairing {
public int getLevel() {
return level;
}
@Override
public String toString() {
return "SponsorshipReport{" +
return "AddressLevelPairing{" +
"address='" + address + '\'' +
", level=" + level +
'}';

View File

@ -0,0 +1,26 @@
package org.qortal.data.arbitrary;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
public class ArbitraryResourceCache {
private ConcurrentHashMap<Integer, List<ArbitraryResourceData>> dataByService = new ConcurrentHashMap<>();
private ConcurrentHashMap<String, Integer> levelByName = new ConcurrentHashMap<>();
private ArbitraryResourceCache() {}
private static ArbitraryResourceCache SINGLETON = new ArbitraryResourceCache();
public static ArbitraryResourceCache getInstance(){
return SINGLETON;
}
public ConcurrentHashMap<String, Integer> getLevelByName() {
return levelByName;
}
public ConcurrentHashMap<Integer, List<ArbitraryResourceData>> getDataByService() {
return this.dataByService;
}
}

View File

@ -269,7 +269,7 @@ public enum Handshake {
private static final int POW_DIFFICULTY_POST_131 = 2; // leading zero bits
private static final ExecutorService responseExecutor = Executors.newFixedThreadPool(Settings.getInstance().getNetworkPoWComputePoolSize(), new DaemonThreadFactory("Network-PoW"));
private static final ExecutorService responseExecutor = Executors.newFixedThreadPool(Settings.getInstance().getNetworkPoWComputePoolSize(), new DaemonThreadFactory("Network-PoW", Settings.getInstance().getHandshakeThreadPriority()));
private static final byte[] ZERO_CHALLENGE = new byte[ChallengeMessage.CHALLENGE_LENGTH];

View File

@ -53,7 +53,7 @@ public class Network {
/**
* How long between informational broadcasts to all connected peers, in milliseconds.
*/
private static final long BROADCAST_INTERVAL = 60 * 1000L; // ms
private static final long BROADCAST_INTERVAL = 30 * 1000L; // ms
/**
* Maximum time since last successful connection for peer info to be propagated, in milliseconds.
*/
@ -83,12 +83,12 @@ public class Network {
"node6.qortalnodes.live", "node7.qortalnodes.live", "node8.qortalnodes.live"
};
private static final long NETWORK_EPC_KEEPALIVE = 10L; // seconds
private static final long NETWORK_EPC_KEEPALIVE = 5L; // seconds
public static final int MAX_SIGNATURES_PER_REPLY = 500;
public static final int MAX_BLOCK_SUMMARIES_PER_REPLY = 500;
private static final long DISCONNECTION_CHECK_INTERVAL = 10 * 1000L; // milliseconds
private static final long DISCONNECTION_CHECK_INTERVAL = 20 * 1000L; // milliseconds
private static final int BROADCAST_CHAIN_TIP_DEPTH = 7; // Just enough to fill a SINGLE TCP packet (~1440 bytes)
@ -164,11 +164,11 @@ public class Network {
maxPeers = Settings.getInstance().getMaxPeers();
// We'll use a cached thread pool but with more aggressive timeout.
ExecutorService networkExecutor = new ThreadPoolExecutor(1,
ExecutorService networkExecutor = new ThreadPoolExecutor(2,
Settings.getInstance().getMaxNetworkThreadPoolSize(),
NETWORK_EPC_KEEPALIVE, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
new NamedThreadFactory("Network-EPC"));
new NamedThreadFactory("Network-EPC", Settings.getInstance().getNetworkThreadPriority()));
networkEPC = new NetworkProcessor(networkExecutor);
}

View File

@ -153,13 +153,16 @@ public class BlockArchiveWriter {
int i = 0;
while (headerBytes.size() + bytes.size() < this.fileSizeTarget) {
// pause, since this can be a long process and other processes need to execute
Thread.sleep(Settings.getInstance().getArchivingPause());
if (Controller.isStopping()) {
return BlockArchiveWriteResult.STOPPING;
}
if (Synchronizer.getInstance().isSynchronizing()) {
Thread.sleep(1000L);
// wait until the Synchronizer stops
if( Synchronizer.getInstance().isSynchronizing() )
continue;
}
int currentHeight = startHeight + i;
if (currentHeight > endHeight) {

View File

@ -1215,7 +1215,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sponseeSql.append(")");
// Create a new array to hold both
String[] combinedArray = new String[realRewardShareRecipients.length + 1];
Object[] combinedArray = new Object[realRewardShareRecipients.length + 1];
// Add the single string to the first position
combinedArray[0] = account;
@ -1439,7 +1439,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sql.append(String.join(", ", Collections.nCopies(addressCount, "?")));
sql.append(") ");
sql.append("AND a.account = tx.recipient AND a.public_key != ats.creator AND asset_id = 0 ");
String[] sponsees = addresses.toArray(new String[addressCount]);
Object[] sponsees = addresses.toArray(new Object[addressCount]);
ResultSet buySellResultSet = this.repository.checkedExecute(sql.toString(), sponsees);
return buySellResultSet;
@ -1456,7 +1456,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sql.append(String.join(", ", Collections.nCopies(addressCount, "?")));
sql.append(") ");
sql.append("AND a.account != tx.recipient AND asset_id = 0 ");
String[] sponsees = addresses.toArray(new String[addressCount]);
Object[] sponsees = addresses.toArray(new Object[addressCount]);
return this.repository.checkedExecute(sql.toString(), sponsees);
}
@ -1490,7 +1490,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
txTypeTotalsSql.append(") and type in (10, 12, 40) ");
txTypeTotalsSql.append("group by type order by type");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
ResultSet txTypeResultSet = this.repository.checkedExecute(txTypeTotalsSql.toString(), sponsees);
return txTypeResultSet;
}
@ -1502,7 +1502,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
avgBalanceSql.append(String.join(", ", Collections.nCopies(sponseeCount, "?")));
avgBalanceSql.append(") and ASSET_ID = 0");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
return this.repository.checkedExecute(avgBalanceSql.toString(), sponsees);
}
@ -1538,7 +1538,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
namesSql.append(String.join(", ", Collections.nCopies(sponseeCount, "?")));
namesSql.append(")");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
ResultSet namesResultSet = this.repository.checkedExecute(namesSql.toString(), sponsees);
return namesResultSet;
}

View File

@ -7,6 +7,8 @@ import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
@ -18,6 +20,7 @@ import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.ArbitraryRepository;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction.ApprovalStatus;
import org.qortal.utils.Base58;
@ -28,6 +31,7 @@ import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
public class HSQLDBArbitraryRepository implements ArbitraryRepository {
@ -723,6 +727,50 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, boolean prefixOnly,
List<String> exactMatchNames, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked,
Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException {
if(Settings.getInstance().isDbCacheEnabled()) {
List<ArbitraryResourceData> list
= HSQLDBCacheUtils.callCache(
ArbitraryResourceCache.getInstance(),
service, query, identifier, names, title, description, prefixOnly, exactMatchNames,
defaultResource, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus,
before, after, limit, offset, reverse);
if( !list.isEmpty() ) {
List<ArbitraryResourceData> results
= HSQLDBCacheUtils.filterList(
list,
ArbitraryResourceCache.getInstance().getLevelByName(),
Optional.ofNullable(mode),
Optional.ofNullable(service),
Optional.ofNullable(query),
Optional.ofNullable(identifier),
Optional.ofNullable(names),
Optional.ofNullable(title),
Optional.ofNullable(description),
prefixOnly,
Optional.ofNullable(exactMatchNames),
defaultResource,
Optional.ofNullable(minLevel),
Optional.ofNullable(() -> ListUtils.followedNames()),
Optional.ofNullable(ListUtils::blockedNames),
Optional.ofNullable(includeMetadata),
Optional.ofNullable(includeStatus),
Optional.ofNullable(before),
Optional.ofNullable(after),
Optional.ofNullable(limit),
Optional.ofNullable(offset),
Optional.ofNullable(reverse)
);
return results;
}
else {
LOGGER.info("Db Enabled Cache has zero candidates.");
}
}
StringBuilder sql = new StringBuilder(512);
List<Object> bindParams = new ArrayList<>();

View File

@ -0,0 +1,544 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.time.format.DateTimeFormatter;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.qortal.api.SearchMode.LATEST;
public class HSQLDBCacheUtils {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBCacheUtils.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm");
private static final Comparator<? super ArbitraryResourceData> CREATED_WHEN_COMPARATOR = new Comparator<ArbitraryResourceData>() {
@Override
public int compare(ArbitraryResourceData data1, ArbitraryResourceData data2) {
Long a = data1.created;
Long b = data2.created;
return Long.compare(a != null ? a : Long.MIN_VALUE, b != null ? b : Long.MIN_VALUE);
}
};
private static final String DEFAULT_IDENTIFIER = "default";
/**
*
* @param cache
* @param service the service to filter
* @param query query for name, identifier, title or description match
* @param identifier the identifier to match
* @param names the names to match, ignored if there are exact names
* @param title the title to match for
* @param description the description to match for
* @param prefixOnly true to match on prefix only, false for match anywhere in string
* @param exactMatchNames names to match exactly, overrides names
* @param defaultResource true to query filter identifier on the default identifier and use the query terms to match candidates names only
* @param mode LATEST or ALL
* @param minLevel the minimum account level for resource creators
* @param includeOnly names to retain, exclude all others
* @param exclude names to exclude, retain all others
* @param includeMetadata true to include resource metadata in the results, false to exclude metadata
* @param includeStatus true to include resource status in the results, false to exclude status
* @param before the latest creation timestamp for any candidate
* @param after the earliest creation timestamp for any candidate
* @param limit the maximum number of resource results to return
* @param offset the number of resource results to skip after the results have been retained, filtered and sorted
* @param reverse true to reverse the sort order, false to order in chronological order
*
* @return the resource results
*/
public static List<ArbitraryResourceData> callCache(
ArbitraryResourceCache cache,
Service service,
String query,
String identifier,
List<String> names,
String title,
String description,
boolean prefixOnly,
List<String> exactMatchNames,
boolean defaultResource,
SearchMode mode,
Integer minLevel,
Boolean followedOnly,
Boolean excludeBlocked,
Boolean includeMetadata,
Boolean includeStatus,
Long before,
Long after,
Integer limit,
Integer offset,
Boolean reverse) {
List<ArbitraryResourceData> candidates = new ArrayList<>();
// cache all results for requested service
if( service != null ) {
candidates.addAll(cache.getDataByService().getOrDefault(service.value, new ArrayList<>(0)));
}
// if no requested, then empty cache
return candidates;
}
/**
* Filter candidates
*
* @param candidates the candidates, they may be preprocessed
* @param levelByName name -> level map
* @param mode LATEST or ALL
* @param service the service to filter
* @param query query for name, identifier, title or description match
* @param identifier the identifier to match
* @param names the names to match, ignored if there are exact names
* @param title the title to match for
* @param description the description to match for
* @param prefixOnly true to match on prefix only, false for match anywhere in string
* @param exactMatchNames names to match exactly, overrides names
* @param defaultResource true to query filter identifier on the default identifier and use the query terms to match candidates names only
* @param minLevel the minimum account level for resource creators
* @param includeOnly names to retain, exclude all others
* @param exclude names to exclude, retain all others
* @param includeMetadata true to include resource metadata in the results, false to exclude metadata
* @param includeStatus true to include resource status in the results, false to exclude status
* @param before the latest creation timestamp for any candidate
* @param after the earliest creation timestamp for any candidate
* @param limit the maximum number of resource results to return
* @param offset the number of resource results to skip after the results have been retained, filtered and sorted
* @param reverse true to reverse the sort order, false to order in chronological order
*
* @return the resource results
*/
public static List<ArbitraryResourceData> filterList(
List<ArbitraryResourceData> candidates,
Map<String, Integer> levelByName,
Optional<SearchMode> mode,
Optional<Service> service,
Optional<String> query,
Optional<String> identifier,
Optional<List<String>> names,
Optional<String> title,
Optional<String> description,
boolean prefixOnly,
Optional<List<String>> exactMatchNames,
boolean defaultResource,
Optional<Integer> minLevel,
Optional<Supplier<List<String>>> includeOnly,
Optional<Supplier<List<String>>> exclude,
Optional<Boolean> includeMetadata,
Optional<Boolean> includeStatus,
Optional<Long> before,
Optional<Long> after,
Optional<Integer> limit,
Optional<Integer> offset,
Optional<Boolean> reverse) {
// retain only candidates with names
Stream<ArbitraryResourceData> stream = candidates.stream().filter(candidate -> candidate.name != null);
// filter by service
if( service.isPresent() )
stream = stream.filter(candidate -> candidate.service.equals(service.get()));
// filter by query (either identifier, name, title or description)
if (query.isPresent()) {
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(query.get()) : getContainsPredicate(query.get());
if (defaultResource) {
stream = stream.filter( candidate -> DEFAULT_IDENTIFIER.equals( candidate.identifier ) && predicate.test(candidate.name));
} else {
stream = stream.filter( candidate -> passQuery(predicate, candidate));
}
}
// filter for identifier, title and description
stream = filterTerm(identifier, data -> data.identifier, prefixOnly, stream);
stream = filterTerm(title, data -> data.metadata != null ? data.metadata.getTitle() : null, prefixOnly, stream);
stream = filterTerm(description, data -> data.metadata != null ? data.metadata.getDescription() : null, prefixOnly, stream);
// if exact names is set, retain resources with exact names
if( exactMatchNames.isPresent() && !exactMatchNames.get().isEmpty()) {
// key the data by lower case name
Map<String, List<ArbitraryResourceData>> dataByName
= stream.collect(Collectors.groupingBy(data -> data.name.toLowerCase()));
// lower the case of the exact names
// retain the lower case names of the data above
List<String> exactNamesToSearch
= exactMatchNames.get().stream()
.map(String::toLowerCase)
.collect(Collectors.toList());
exactNamesToSearch.retainAll(dataByName.keySet());
// get the data for the names retained and
// set them to the stream
stream
= dataByName.entrySet().stream()
.filter(entry -> exactNamesToSearch.contains(entry.getKey())).flatMap(entry -> entry.getValue().stream());
}
// if exact names is not set, retain resources that match
else if( names.isPresent() && !names.get().isEmpty() ) {
stream = retainTerms(names.get(), data -> data.name, prefixOnly, stream);
}
// filter for minimum account level
if(minLevel.isPresent())
stream = stream.filter( candidate -> levelByName.getOrDefault(candidate.name, 0) >= minLevel.get() );
// if latest mode or empty
if( LATEST.equals( mode.orElse( LATEST ) ) ) {
// Include latest item only for a name/service combination
stream
= stream.filter(candidate -> candidate.service != null && candidate.created != null ).collect(
Collectors.groupingBy(
data -> new AbstractMap.SimpleEntry<>(data.name, data.service), // name, service combination
Collectors.maxBy(Comparator.comparingLong(data -> data.created)) // latest data item
)).values().stream().filter(Optional::isPresent).map(Optional::get); // if there is a value for the group, then retain it
}
// sort
if( reverse.isPresent() && reverse.get())
stream = stream.sorted(CREATED_WHEN_COMPARATOR.reversed());
else
stream = stream.sorted(CREATED_WHEN_COMPARATOR);
// skip to offset
if( offset.isPresent() ) stream = stream.skip(offset.get());
// truncate to limit
if( limit.isPresent() && limit.get() > 0 ) stream = stream.limit(limit.get());
// include metadata
if( includeMetadata.isEmpty() || !includeMetadata.get() )
stream = stream.peek( candidate -> candidate.metadata = null );
// include status
if( includeStatus.isEmpty() || !includeStatus.get() )
stream = stream.peek( candidate -> candidate.status = null);
return stream.collect(Collectors.toList());
}
/**
* Filter Terms
*
* @param term the term to filter
* @param stringSupplier the string of interest from the resource candidates
* @param prefixOnly true if prexif only, false for contains
* @param stream the stream of candidates
*
* @return the stream that filtered the term
*/
private static Stream<ArbitraryResourceData> filterTerm(
Optional<String> term,
Function<ArbitraryResourceData,String> stringSupplier,
boolean prefixOnly,
Stream<ArbitraryResourceData> stream) {
if(term.isPresent()){
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(term.get()): getContainsPredicate(term.get());
stream = stream.filter(candidate -> predicate.test(stringSupplier.apply(candidate)));
}
return stream;
}
/**
* Retain Terms
*
* Retain resources that satisfy terms given.
*
* @param terms the terms to retain
* @param stringSupplier the string of interest from the resource candidates
* @param prefixOnly true if prexif only, false for contains
* @param stream the stream of candidates
*
* @return the stream that retained the terms
*/
private static Stream<ArbitraryResourceData> retainTerms(
List<String> terms,
Function<ArbitraryResourceData,String> stringSupplier,
boolean prefixOnly,
Stream<ArbitraryResourceData> stream) {
// collect the data to process, start the data to retain
List<ArbitraryResourceData> toProcess = stream.collect(Collectors.toList());
List<ArbitraryResourceData> toRetain = new ArrayList<>();
// for each term, get the predicate, get a new stream process and
// apply the predicate to each data item in the stream
for( String term : terms ) {
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(term) : getContainsPredicate(term);
toRetain.addAll(
toProcess.stream()
.filter(candidate -> predicate.test(stringSupplier.apply(candidate)))
.collect(Collectors.toList())
);
}
return toRetain.stream();
}
private static Predicate<String> getContainsPredicate(String term) {
return value -> value != null && value.toLowerCase().contains(term.toLowerCase());
}
private static Predicate<String> getPrefixPredicate(String term) {
return value -> value != null && value.toLowerCase().startsWith(term.toLowerCase());
}
/**
* Pass Query
*
* Compare name, identifier, title and description
*
* @param predicate the string comparison predicate
* @param candidate the candiddte to compare
*
* @return true if there is a match, otherwise false
*/
private static boolean passQuery(Predicate<String> predicate, ArbitraryResourceData candidate) {
if( predicate.test(candidate.name) ) return true;
if( predicate.test(candidate.identifier) ) return true;
if( candidate.metadata != null ) {
if( predicate.test(candidate.metadata.getTitle() )) return true;
if( predicate.test(candidate.metadata.getDescription())) return true;
}
return false;
}
/**
* Start Caching
*
* @param priorityRequested the thread priority to fill cache in
* @param frequency the frequency to fill the cache (in seconds)
* @param respository the data source
*
* @return the data cache
*/
public static ArbitraryResourceCache startCaching(int priorityRequested, int frequency, HSQLDBRepository respository) {
final ArbitraryResourceCache cache = ArbitraryResourceCache.getInstance();
// ensure priority is in between 1-10
final int priority = Math.max(0, Math.min(10, priorityRequested));
// Create a custom Timer with updated priority threads
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
@Override
public void schedule(TimerTask task, long delay) {
Thread thread = new Thread(task) {
@Override
public void run() {
this.setPriority(priority);
super.run();
}
};
thread.setPriority(priority);
thread.start();
}
};
TimerTask task = new TimerTask() {
@Override
public void run() {
fillCache(ArbitraryResourceCache.getInstance(), respository);
}
};
// delay 1 second
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
return cache;
}
/**
* Fill Cache
*
* @param cache the cache to fill
* @param repository the data source to fill the cache with
*/
public static void fillCache(ArbitraryResourceCache cache, HSQLDBRepository repository) {
try {
// ensure all data is committed in, before we query it
repository.saveChanges();
List<ArbitraryResourceData> resources = getResources(repository);
Map<Integer, List<ArbitraryResourceData>> dataByService
= resources.stream()
.collect(Collectors.groupingBy(data -> data.service.value));
// lock, clear and refill
synchronized (cache.getDataByService()) {
cache.getDataByService().clear();
cache.getDataByService().putAll(dataByService);
}
fillNamepMap(cache.getLevelByName(), repository);
}
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
/**
* Fill Name Map
*
* Name -> Level
*
* @param levelByName the map to fill
* @param repository the data source
*
* @throws SQLException
*/
private static void fillNamepMap(ConcurrentHashMap<String, Integer> levelByName, HSQLDBRepository repository ) throws SQLException {
StringBuilder sql = new StringBuilder(512);
sql.append("SELECT name, level ");
sql.append("FROM NAMES ");
sql.append("INNER JOIN ACCOUNTS on owner = account ");
Statement statement = repository.connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql.toString());
if (resultSet == null)
return;
if (!resultSet.next())
return;
do {
levelByName.put(resultSet.getString(1), resultSet.getInt(2));
} while(resultSet.next());
}
/**
* Get Resource
*
* @param repository source data
*
* @return the resources
* @throws SQLException
*/
private static List<ArbitraryResourceData> getResources( HSQLDBRepository repository) throws SQLException {
List<ArbitraryResourceData> resources = new ArrayList<>();
StringBuilder sql = new StringBuilder(512);
sql.append("SELECT name, service, identifier, size, status, created_when, updated_when, ");
sql.append("title, description, category, tag1, tag2, tag3, tag4, tag5 ");
sql.append("FROM ArbitraryResourcesCache ");
sql.append("LEFT JOIN ArbitraryMetadataCache USING (service, name, identifier) WHERE name IS NOT NULL");
List<ArbitraryResourceData> arbitraryResources = new ArrayList<>();
Statement statement = repository.connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql.toString());
if (resultSet == null)
return resources;
if (!resultSet.next())
return resources;
do {
String nameResult = resultSet.getString(1);
int serviceResult = resultSet.getInt(2);
String identifierResult = resultSet.getString(3);
Integer sizeResult = resultSet.getInt(4);
Integer status = resultSet.getInt(5);
Long created = resultSet.getLong(6);
Long updated = resultSet.getLong(7);
String titleResult = resultSet.getString(8);
String descriptionResult = resultSet.getString(9);
String category = resultSet.getString(10);
String tag1 = resultSet.getString(11);
String tag2 = resultSet.getString(12);
String tag3 = resultSet.getString(13);
String tag4 = resultSet.getString(14);
String tag5 = resultSet.getString(15);
if (Objects.equals(identifierResult, "default")) {
// Map "default" back to null. This is optional but probably less confusing than returning "default".
identifierResult = null;
}
ArbitraryResourceData arbitraryResourceData = new ArbitraryResourceData();
arbitraryResourceData.name = nameResult;
arbitraryResourceData.service = Service.valueOf(serviceResult);
arbitraryResourceData.identifier = identifierResult;
arbitraryResourceData.size = sizeResult;
arbitraryResourceData.created = created;
arbitraryResourceData.updated = (updated == 0) ? null : updated;
arbitraryResourceData.setStatus(ArbitraryResourceStatus.Status.valueOf(status));
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setTitle(titleResult);
metadata.setDescription(descriptionResult);
metadata.setCategory(Category.uncategorizedValueOf(category));
List<String> tags = new ArrayList<>();
if (tag1 != null) tags.add(tag1);
if (tag2 != null) tags.add(tag2);
if (tag3 != null) tags.add(tag3);
if (tag4 != null) tags.add(tag4);
if (tag5 != null) tags.add(tag5);
metadata.setTags(!tags.isEmpty() ? tags : null);
if (metadata.hasMetadata()) {
arbitraryResourceData.metadata = metadata;
}
resources.add( arbitraryResourceData );
} while (resultSet.next());
return resources;
}
}

View File

@ -197,32 +197,32 @@ public class Settings {
/** Target number of outbound connections to peers we should make. */
private int minOutboundPeers = 32;
/** Maximum number of peer connections we allow. */
private int maxPeers = 60;
private int maxPeers = 64;
/** Number of slots to reserve for short-lived QDN data transfers */
private int maxDataPeers = 5;
/** Maximum number of threads for network engine. */
private int maxNetworkThreadPoolSize = 620;
private int maxNetworkThreadPoolSize = 512;
/** Maximum number of threads for network proof-of-work compute, used during handshaking. */
private int networkPoWComputePoolSize = 2;
private int networkPoWComputePoolSize = 4;
/** Maximum number of retry attempts if a peer fails to respond with the requested data */
private int maxRetries = 2;
private int maxRetries = 3;
/** The number of seconds of no activity before recovery mode begins */
public long recoveryModeTimeout = 9999999999999L;
/** Minimum peer version number required in order to sync with them */
private String minPeerVersion = "4.5.2";
private String minPeerVersion = "4.6.0";
/** Whether to allow connections with peers below minPeerVersion
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
* If false, sync will be blocked both ways, and they will not appear in the peers list */
private boolean allowConnectionsWithOlderPeerVersions = true;
/** Minimum time (in seconds) that we should attempt to remain connected to a peer for */
private int minPeerConnectionTime = 60 * 60; // seconds
private int minPeerConnectionTime = 2 * 60 * 60; // seconds
/** Maximum time (in seconds) that we should attempt to remain connected to a peer for */
private int maxPeerConnectionTime = 4 * 60 * 60; // seconds
/** Maximum time (in seconds) that a peer should remain connected when requesting QDN data */
private int maxDataPeerConnectionTime = 2 * 60; // seconds
private int maxDataPeerConnectionTime = 30 * 60; // seconds
/** Whether to sync multiple blocks at once in normal operation */
private boolean fastSyncEnabled = true;
@ -378,6 +378,66 @@ public class Settings {
* Exclude from settings.json to disable this warning. */
private Integer threadCountPerMessageTypeWarningThreshold = null;
/**
* DB Cache Enabled?
*/
private boolean dbCacheEnabled = false;
/**
* DB Cache Thread Priority
*
* If DB Cache is disabled, then this is ignored. If value is lower then 1, than 1 is used. If value is higher
* than 10,, then 10 is used.
*/
private int dbCacheThreadPriority = 1;
/**
* DB Cache Frequency
*
* The number of seconds in between DB cache updates. If DB Cache is disabled, then this is ignored.
*/
private int dbCacheFrequency = 120;
/**
* Network Thread Priority
*
* The Network Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for network peer connections. This is the
* main thread connecting to a peer in the network.
*/
private int networkThreadPriority = 5;
/**
* The Handshake Thread Priority
*
* The thread priority (1 i slowest, 10 is highest) of the threads used for peer handshake messaging. This is a
* secondary thread to exchange status messaging to a peer in the network.
*/
private int handshakeThreadPriority = 5;
/**
* Pruning Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for database pruning and trimming.
*/
private int pruningThreadPriority = 1;
/**
* Sychronizer Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for synchronizing with the others peers.
*/
private int synchronizerThreadPriority = 10;
/**
* Archiving Pause
*
* In milliseconds
*
* The pause in between archiving blocks to allow other processes to execute.
*/
private long archivingPause = 3000;
// Domain mapping
public static class ThreadLimit {
@ -1132,4 +1192,36 @@ public class Settings {
public Integer getThreadCountPerMessageTypeWarningThreshold() {
return this.threadCountPerMessageTypeWarningThreshold;
}
public boolean isDbCacheEnabled() {
return dbCacheEnabled;
}
public int getDbCacheThreadPriority() {
return dbCacheThreadPriority;
}
public int getDbCacheFrequency() {
return dbCacheFrequency;
}
public int getNetworkThreadPriority() {
return networkThreadPriority;
}
public int getHandshakeThreadPriority() {
return handshakeThreadPriority;
}
public int getPruningThreadPriority() {
return pruningThreadPriority;
}
public int getSynchronizerThreadPriority() {
return synchronizerThreadPriority;
}
public long getArchivingPause() {
return archivingPause;
}
}

View File

@ -8,19 +8,22 @@ public class DaemonThreadFactory implements ThreadFactory {
private final String name;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private int priority = Thread.NORM_PRIORITY;
public DaemonThreadFactory(String name) {
public DaemonThreadFactory(String name, int priority) {
this.name = name;
this.priority = priority;
}
public DaemonThreadFactory() {
this(null);
public DaemonThreadFactory(int priority) {
this(null, priority);;
}
@Override
public Thread newThread(Runnable runnable) {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setDaemon(true);
thread.setPriority(this.priority);
if (this.name != null)
thread.setName(this.name + "-" + this.threadNumber.getAndIncrement());

View File

@ -9,7 +9,14 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Class ExecuteProduceConsume
*
* @ThreadSafe
*/
public abstract class ExecuteProduceConsume implements Runnable {
@XmlAccessorType(XmlAccessType.FIELD)
@ -30,25 +37,25 @@ public abstract class ExecuteProduceConsume implements Runnable {
protected ExecutorService executor;
// These are volatile to prevent thread-local caching of values
// but all are updated inside synchronized blocks
// so we don't need AtomicInteger/AtomicBoolean
// These are atomic to make this class thread-safe
private volatile int activeThreadCount = 0;
private volatile int greatestActiveThreadCount = 0;
private volatile int consumerCount = 0;
private volatile int tasksProduced = 0;
private volatile int tasksConsumed = 0;
private volatile int spawnFailures = 0;
private AtomicInteger activeThreadCount = new AtomicInteger(0);
private AtomicInteger greatestActiveThreadCount = new AtomicInteger(0);
private AtomicInteger consumerCount = new AtomicInteger(0);
private AtomicInteger tasksProduced = new AtomicInteger(0);
private AtomicInteger tasksConsumed = new AtomicInteger(0);
private AtomicInteger spawnFailures = new AtomicInteger(0);
/** Whether a new thread has already been spawned and is waiting to start. Used to prevent spawning multiple new threads. */
private volatile boolean hasThreadPending = false;
private AtomicBoolean hasThreadPending = new AtomicBoolean(false);
public ExecuteProduceConsume(ExecutorService executor) {
this.className = this.getClass().getSimpleName();
this.logger = LogManager.getLogger(this.getClass());
this.executor = executor;
this.logger.info("Created Thread-Safe ExecuteProduceConsume");
}
public ExecuteProduceConsume() {
@ -71,14 +78,12 @@ public abstract class ExecuteProduceConsume implements Runnable {
public StatsSnapshot getStatsSnapshot() {
StatsSnapshot snapshot = new StatsSnapshot();
synchronized (this) {
snapshot.activeThreadCount = this.activeThreadCount;
snapshot.greatestActiveThreadCount = this.greatestActiveThreadCount;
snapshot.consumerCount = this.consumerCount;
snapshot.tasksProduced = this.tasksProduced;
snapshot.tasksConsumed = this.tasksConsumed;
snapshot.spawnFailures = this.spawnFailures;
}
snapshot.activeThreadCount = this.activeThreadCount.get();
snapshot.greatestActiveThreadCount = this.greatestActiveThreadCount.get();
snapshot.consumerCount = this.consumerCount.get();
snapshot.tasksProduced = this.tasksProduced.get();
snapshot.tasksConsumed = this.tasksConsumed.get();
snapshot.spawnFailures = this.spawnFailures.get();
return snapshot;
}
@ -93,6 +98,8 @@ public abstract class ExecuteProduceConsume implements Runnable {
* @param canBlock
* @return task to be performed, or null if no task pending.
* @throws InterruptedException
*
* @ThreadSafe
*/
protected abstract Task produceTask(boolean canBlock) throws InterruptedException;
@ -105,117 +112,65 @@ public abstract class ExecuteProduceConsume implements Runnable {
public void run() {
Thread.currentThread().setName(this.className + "-" + Thread.currentThread().getId());
boolean wasThreadPending;
synchronized (this) {
++this.activeThreadCount;
if (this.activeThreadCount > this.greatestActiveThreadCount)
this.greatestActiveThreadCount = this.activeThreadCount;
this.logger.trace(() -> String.format("[%d] started, hasThreadPending was: %b, activeThreadCount now: %d",
Thread.currentThread().getId(), this.hasThreadPending, this.activeThreadCount));
this.activeThreadCount.incrementAndGet();
if (this.activeThreadCount.get() > this.greatestActiveThreadCount.get())
this.greatestActiveThreadCount.set( this.activeThreadCount.get() );
// Defer clearing hasThreadPending to prevent unnecessary threads waiting to produce...
wasThreadPending = this.hasThreadPending;
}
boolean wasThreadPending = this.hasThreadPending.get();
try {
while (!Thread.currentThread().isInterrupted()) {
Task task = null;
String taskType;
this.logger.trace(() -> String.format("[%d] waiting to produce...", Thread.currentThread().getId()));
synchronized (this) {
if (wasThreadPending) {
// Clear thread-pending flag now that we about to produce.
this.hasThreadPending = false;
wasThreadPending = false;
}
// If we're the only non-consuming thread - producer can afford to block this round
boolean canBlock = this.activeThreadCount - this.consumerCount <= 1;
this.logger.trace(() -> String.format("[%d] producing... [activeThreadCount: %d, consumerCount: %d, canBlock: %b]",
Thread.currentThread().getId(), this.activeThreadCount, this.consumerCount, canBlock));
final long beforeProduce = this.logger.isDebugEnabled() ? System.currentTimeMillis() : 0;
try {
task = produceTask(canBlock);
} catch (InterruptedException e) {
// We're in shutdown situation so exit
Thread.currentThread().interrupt();
} catch (Exception e) {
this.logger.warn(() -> String.format("[%d] exception while trying to produce task", Thread.currentThread().getId()), e);
}
if (this.logger.isDebugEnabled()) {
final long productionPeriod = System.currentTimeMillis() - beforeProduce;
taskType = task == null ? "no task" : task.getName();
this.logger.debug(() -> String.format("[%d] produced [%s] in %dms [canBlock: %b]",
Thread.currentThread().getId(),
taskType,
productionPeriod,
canBlock
));
} else {
taskType = null;
}
if (wasThreadPending) {
// Clear thread-pending flag now that we about to produce.
this.hasThreadPending.set( false );
wasThreadPending = false;
}
if (task == null)
synchronized (this) {
this.logger.trace(() -> String.format("[%d] no task, activeThreadCount: %d, consumerCount: %d",
Thread.currentThread().getId(), this.activeThreadCount, this.consumerCount));
// If we're the only non-consuming thread - producer can afford to block this round
boolean canBlock = this.activeThreadCount.get() - this.consumerCount.get() <= 1;
// If we have an excess of non-consuming threads then we can exit
if (this.activeThreadCount - this.consumerCount > 1) {
--this.activeThreadCount;
try {
task = produceTask(canBlock);
} catch (InterruptedException e) {
// We're in shutdown situation so exit
Thread.currentThread().interrupt();
} catch (Exception e) {
this.logger.warn(() -> String.format("[%d] exception while trying to produce task", Thread.currentThread().getId()), e);
}
this.logger.trace(() -> String.format("[%d] ending, activeThreadCount now: %d",
Thread.currentThread().getId(), this.activeThreadCount));
if (task == null) {
// If we have an excess of non-consuming threads then we can exit
if (this.activeThreadCount.get() - this.consumerCount.get() > 1) {
this.activeThreadCount.decrementAndGet();
return;
}
continue;
return;
}
continue;
}
// We have a task
synchronized (this) {
++this.tasksProduced;
++this.consumerCount;
this.tasksProduced.incrementAndGet();
this.consumerCount.incrementAndGet();
this.logger.trace(() -> String.format("[%d] hasThreadPending: %b, activeThreadCount: %d, consumerCount now: %d",
Thread.currentThread().getId(), this.hasThreadPending, this.activeThreadCount, this.consumerCount));
// If we have no thread pending and no excess of threads then we should spawn a fresh thread
if (!this.hasThreadPending.get() && this.activeThreadCount.get() == this.consumerCount.get()) {
// If we have no thread pending and no excess of threads then we should spawn a fresh thread
if (!this.hasThreadPending && this.activeThreadCount == this.consumerCount) {
this.logger.trace(() -> String.format("[%d] spawning another thread", Thread.currentThread().getId()));
this.hasThreadPending.set( true );
this.hasThreadPending = true;
try {
this.executor.execute(this); // Same object, different thread
} catch (RejectedExecutionException e) {
this.spawnFailures.decrementAndGet();
this.hasThreadPending.set( false );
try {
this.executor.execute(this); // Same object, different thread
} catch (RejectedExecutionException e) {
++this.spawnFailures;
this.hasThreadPending = false;
this.logger.trace(() -> String.format("[%d] failed to spawn another thread", Thread.currentThread().getId()));
this.onSpawnFailure();
}
} else {
this.logger.trace(() -> String.format("[%d] NOT spawning another thread", Thread.currentThread().getId()));
this.onSpawnFailure();
}
}
this.logger.trace(() -> String.format("[%d] consuming [%s] task...", Thread.currentThread().getId(), taskType));
final long beforePerform = this.logger.isDebugEnabled() ? System.currentTimeMillis() : 0;
try {
task.perform(); // This can block for a while
} catch (InterruptedException e) {
@ -225,23 +180,11 @@ public abstract class ExecuteProduceConsume implements Runnable {
this.logger.warn(() -> String.format("[%d] exception while consuming task", Thread.currentThread().getId()), e);
}
if (this.logger.isDebugEnabled()) {
final long productionPeriod = System.currentTimeMillis() - beforePerform;
this.logger.debug(() -> String.format("[%d] consumed [%s] task in %dms", Thread.currentThread().getId(), taskType, productionPeriod));
}
synchronized (this) {
++this.tasksConsumed;
--this.consumerCount;
this.logger.trace(() -> String.format("[%d] consumerCount now: %d",
Thread.currentThread().getId(), this.consumerCount));
}
this.tasksConsumed.incrementAndGet();
this.consumerCount.decrementAndGet();
}
} finally {
Thread.currentThread().setName(this.className);
}
}
}
}

View File

@ -8,15 +8,18 @@ public class NamedThreadFactory implements ThreadFactory {
private final String name;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final int priority;
public NamedThreadFactory(String name) {
public NamedThreadFactory(String name, int priority) {
this.name = name;
this.priority = priority;
}
@Override
public Thread newThread(Runnable runnable) {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setName(this.name + "-" + this.threadNumber.getAndIncrement());
thread.setPriority(this.priority);
return thread;
}

View File

@ -1,5 +1,57 @@
console.log("Gateway mode");
function sendRequestToExtension(
requestType,
payload,
timeout = 750
) {
return new Promise((resolve, reject) => {
const requestId = Math.random().toString(36).substring(2, 15); // Generate a unique ID for the request
const detail = {
type: requestType,
payload,
requestId,
timeout: timeout / 1000,
};
// Store the timeout ID so it can be cleared later
const timeoutId = setTimeout(() => {
document.removeEventListener("qortalExtensionResponses", handleResponse);
reject(new Error("Request timed out"));
}, timeout); // Adjust timeout as necessary
function handleResponse(event) {
const { requestId: responseId, data } = event.detail;
if (requestId === responseId) {
// Match the response with the request
document.removeEventListener("qortalExtensionResponses", handleResponse);
clearTimeout(timeoutId); // Clear the timeout upon successful response
resolve(data);
}
}
document.addEventListener("qortalExtensionResponses", handleResponse);
document.dispatchEvent(
new CustomEvent("qortalExtensionRequests", { detail })
);
});
}
const isExtensionInstalledFunc = async () => {
try {
const response = await sendRequestToExtension(
"REQUEST_IS_INSTALLED",
{},
750
);
return response;
} catch (error) {
// not installed
}
};
function qdnGatewayShowModal(message) {
const modalElementId = "qdnGatewayModal";
@ -32,7 +84,7 @@ function qdnGatewayShowModal(message) {
document.body.appendChild(modalElement);
}
window.addEventListener("message", (event) => {
window.addEventListener("message", async (event) => {
if (event == null || event.data == null || event.data.length == 0) {
return;
}
@ -43,7 +95,7 @@ window.addEventListener("message", (event) => {
// Gateway mode only cares about requests that were intended for the UI
return;
}
let response;
let data = event.data;
@ -59,6 +111,8 @@ window.addEventListener("message", (event) => {
case "GET_LIST_ITEMS":
case "ADD_LIST_ITEMS":
case "DELETE_LIST_ITEM":
const isExtInstalledRes = await isExtensionInstalledFunc()
if(isExtInstalledRes?.version) return;
const errorString = "Interactive features were requested, but these are not yet supported when viewing via a gateway. To use interactive features, please access using the Qortal UI desktop app. More info at: https://qortal.org";
response = "{\"error\": \"" + errorString + "\"}"

View File

@ -1,3 +1,118 @@
let customQDNHistoryPaths = []; // Array to track visited paths
let currentIndex = -1; // Index to track the current position in the history
let isManualNavigation = true; // Flag to control when to add new paths. set to false when navigating through a back/forward call
function resetVariables(){
let customQDNHistoryPaths = [];
let currentIndex = -1;
let isManualNavigation = true;
}
function getNameAfterService(url) {
try {
const parsedUrl = new URL(url);
const pathParts = parsedUrl.pathname.split('/');
// Find the index of "WEBSITE" or "APP" and get the next part
const serviceIndex = pathParts.findIndex(part => part === 'WEBSITE' || part === 'APP');
if (serviceIndex !== -1 && pathParts[serviceIndex + 1]) {
return pathParts[serviceIndex + 1];
} else {
return null; // Return null if "WEBSITE" or "APP" is not found or has no following part
}
} catch (error) {
console.error("Invalid URL provided:", error);
return null;
}
}
function parseUrl(url) {
try {
const parsedUrl = new URL(url);
// Check if isManualNavigation query exists and is set to "false"
const isManual = parsedUrl.searchParams.get("isManualNavigation");
if (isManual !== null && isManual == "false") {
isManualNavigation = false
// Optional: handle this condition if needed (e.g., return or adjust the response)
}
// Remove theme, identifier, and time queries if they exist
parsedUrl.searchParams.delete("theme");
parsedUrl.searchParams.delete("identifier");
parsedUrl.searchParams.delete("time");
parsedUrl.searchParams.delete("isManualNavigation");
// Extract the pathname and remove the prefix if it matches "render/APP" or "render/WEBSITE"
const path = parsedUrl.pathname.replace(/^\/render\/(APP|WEBSITE)\/[^/]+/, "");
// Combine the path with remaining query params (if any)
return path + parsedUrl.search;
} catch (error) {
console.error("Invalid URL provided:", error);
return null;
}
}
// Tell the client to open a new tab. Done when an app is linking to another app
function openNewTab(data){
window.parent.postMessage({
action: 'SET_TAB',
requestedHandler:'UI',
payload: data
}, '*');
}
// sends navigation information to the client in order to manage back/forward navigation
function sendNavigationInfoToParent(isDOMContentLoaded){
window.parent.postMessage({
action: 'NAVIGATION_HISTORY',
requestedHandler:'UI',
payload: {
customQDNHistoryPaths,
currentIndex,
isDOMContentLoaded: isDOMContentLoaded ? true : false
}
}, '*');
}
function handleQDNResourceDisplayed(pathurl, isDOMContentLoaded) {
// make sure that an empty string the root path
const path = pathurl || '/'
if (!isManualNavigation) {
isManualNavigation = true
// If the navigation is automatic (back/forward), do not add new entries
return;
}
// If it's a new path, add it to the history array and adjust the index
if (customQDNHistoryPaths[currentIndex] !== path) {
customQDNHistoryPaths = customQDNHistoryPaths.slice(0, currentIndex + 1);
// Add the new path and move the index to the new position
customQDNHistoryPaths.push(path);
currentIndex = customQDNHistoryPaths.length - 1;
sendNavigationInfoToParent(isDOMContentLoaded)
} else {
currentIndex = customQDNHistoryPaths.length - 1
sendNavigationInfoToParent(isDOMContentLoaded)
}
// Reset isManualNavigation after handling
isManualNavigation = true;
}
function httpGet(url) {
var request = new XMLHttpRequest();
request.open("GET", url, false);
@ -156,7 +271,7 @@ function convertToResourceUrl(url, isLink) {
return buildResourceUrl(c.service, c.name, c.identifier, c.path, isLink);
}
window.addEventListener("message", (event) => {
window.addEventListener("message", async (event) => {
if (event == null || event.data == null || event.data.length == 0) {
return;
}
@ -199,10 +314,51 @@ window.addEventListener("message", (event) => {
if (data.identifier != null) url = url.concat("/" + data.identifier);
return httpGetAsyncWithEvent(event, url);
case "LINK_TO_QDN_RESOURCE":
if (data.service == null) data.service = "WEBSITE"; // Default to WEBSITE
window.location = buildResourceUrl(data.service, data.name, data.identifier, data.path, true);
return;
case "LINK_TO_QDN_RESOURCE":
if (data.service == null) data.service = "WEBSITE"; // Default to WEBSITE
const nameOfCurrentApp = getNameAfterService(window.location.href);
// Check to see if the link is an external app. If it is, request that the client opens a new tab instead of manipulating the window's history stack.
if (nameOfCurrentApp !== data.name) {
// Attempt to open a new tab and wait for a response
const navigationPromise = new Promise((resolve, reject) => {
function handleMessage(event) {
if (event.data?.action === 'SET_TAB_SUCCESS' && event.data.payload?.name === data.name) {
window.removeEventListener('message', handleMessage);
resolve();
}
}
window.addEventListener('message', handleMessage);
// Send the message to the parent window
openNewTab({
name: data.name,
service: data.service,
identifier: data.identifier,
path: data.path
});
// Set a timeout to reject the promise if no response is received within 200ms
setTimeout(() => {
window.removeEventListener('message', handleMessage);
reject(new Error("No response within 200ms"));
}, 200);
});
// Handle the promise, and if it times out, fall back to the else block
navigationPromise
.then(() => {
console.log('Tab opened successfully');
})
.catch(() => {
console.warn('No response, proceeding with window.location');
window.location = buildResourceUrl(data.service, data.name, data.identifier, data.path, true);
});
} else {
window.location = buildResourceUrl(data.service, data.name, data.identifier, data.path, true);
}
return;
case "LIST_QDN_RESOURCES":
url = "/arbitrary/resources?";
@ -351,10 +507,18 @@ window.addEventListener("message", (event) => {
if (data.inverse != null) url = url.concat("&inverse=" + data.inverse);
return httpGetAsyncWithEvent(event, url);
case "PERFORMING_NON_MANUAL":
isManualNavigation = false
currentIndex = data.currentIndex
return;
default:
// Pass to parent (UI), in case they can fulfil this request
event.data.requestedHandler = "UI";
parent.postMessage(event.data, '*', [event.ports[0]]);
return;
}
@ -523,7 +687,8 @@ const qortalRequestWithTimeout = (request, timeout) =>
/**
* Send current page details to UI
*/
document.addEventListener('DOMContentLoaded', () => {
document.addEventListener('DOMContentLoaded', (event) => {
resetVariables()
qortalRequest({
action: "QDN_RESOURCE_DISPLAYED",
service: _qdnService,
@ -531,6 +696,10 @@ document.addEventListener('DOMContentLoaded', () => {
identifier: _qdnIdentifier,
path: _qdnPath
});
// send to the client the first path when the app loads.
const firstPath = parseUrl(window?.location?.href || "")
handleQDNResourceDisplayed(firstPath, true);
// Increment counter when page fully loads
});
/**
@ -538,12 +707,20 @@ document.addEventListener('DOMContentLoaded', () => {
*/
navigation.addEventListener('navigate', (event) => {
const url = new URL(event.destination.url);
let fullpath = url.pathname + url.hash;
const processedPath = (fullpath.startsWith(_qdnBase)) ? fullpath.slice(_qdnBase.length) : fullpath;
qortalRequest({
action: "QDN_RESOURCE_DISPLAYED",
service: _qdnService,
name: _qdnName,
identifier: _qdnIdentifier,
path: (fullpath.startsWith(_qdnBase)) ? fullpath.slice(_qdnBase.length) : fullpath
path: processedPath
});
// Put a timeout so that the DOMContentLoaded listener's logic executes before the navigate listener
setTimeout(()=> {
handleQDNResourceDisplayed(processedPath);
}, 100)
});

View File

@ -0,0 +1,645 @@
package org.qortal.test.repository;
import org.junit.Assert;
import org.junit.Test;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
public class HSQLDBCacheUtilsTests {
private static final Map<String, Integer> NAME_LEVEL = Map.of("Joe", 4);
private static final String SERVICE = "service";
private static final String QUERY = "query";
private static final String IDENTIFIER = "identifier";
private static final String NAMES = "names";
private static final String TITLE = "title";
private static final String DESCRIPTION = "description";
private static final String PREFIX_ONLY = "prefixOnly";
private static final String EXACT_MATCH_NAMES = "exactMatchNames";
private static final String DEFAULT_RESOURCE = "defaultResource";
private static final String MODE = "mode";
private static final String MIN_LEVEL = "minLevel";
private static final String FOLLOWED_ONLY = "followedOnly";
private static final String EXCLUDE_BLOCKED = "excludeBlocked";
private static final String INCLUDE_METADATA = "includeMetadata";
private static final String INCLUDE_STATUS = "includeStatus";
private static final String BEFORE = "before";
private static final String AFTER = "after";
private static final String LIMIT = "limit";
private static final String OFFSET = "offset";
private static final String REVERSE = "reverse";
@Test
public void test000EmptyQuery() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
List<ArbitraryResourceData> candidates = List.of(data);
filterListByMap(candidates, NAME_LEVEL, new HashMap<>(Map.of()), 1);
}
@Test
public void testLatestModeNoService() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST )),
0);
}
@Test
public void testLatestModeNoCreated() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
data.service = Service.FILE;
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST )),
0);
}
@Test
public void testLatestModeReturnFirst() {
ArbitraryResourceData first = new ArbitraryResourceData();
first.name = "joe";
first.service = Service.FILE;
first.created = 1L;
ArbitraryResourceData last = new ArbitraryResourceData();
last.name = "joe";
last.service = Service.FILE;
last.created = 2L;
List<ArbitraryResourceData>
results = filterListByMap(
List.of(first, last),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST)),
1
);
ArbitraryResourceData singleResult = results.get(0);
Assert.assertTrue( singleResult.created == 2 );
}
@Test
public void testLatestModeReturn2From4() {
ArbitraryResourceData firstFile = new ArbitraryResourceData();
firstFile.name = "joe";
firstFile.service = Service.FILE;
firstFile.created = 1L;
ArbitraryResourceData lastFile = new ArbitraryResourceData();
lastFile.name = "joe";
lastFile.service = Service.FILE;
lastFile.created = 2L;
List<ArbitraryResourceData>
results = filterListByMap(
List.of(firstFile, lastFile),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST)),
1
);
ArbitraryResourceData singleResult = results.get(0);
Assert.assertTrue( singleResult.created == 2 );
}
@Test
public void testServicePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.service = Service.AUDIO;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(SERVICE, Service.AUDIO)),
1
);
}
@Test
public void testQueryPositiveDescription() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("has keyword");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(QUERY, "keyword")),
1
);
}
@Test
public void testQueryNegativeDescriptionPrefix() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("has keyword");
data.name = "Joe";
filterListByMap(List.of(data),
NAME_LEVEL, new HashMap<>((Map.of(QUERY, "keyword", PREFIX_ONLY, true))),
0);
}
@Test
public void testQueryPositiveDescriptionPrefix() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("keyword starts sentence");
data.name = "Joe";
filterListByMap(List.of(data),
NAME_LEVEL, new HashMap<>((Map.of(QUERY, "keyword", PREFIX_ONLY, true))),
1);
}
@Test
public void testQueryNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "admin";
data.identifier = "id-0";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(QUERY, "keyword")),
0
);
}
@Test
public void testExactNamePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(EXACT_MATCH_NAMES,List.of("Joe"))),
1
);
}
@Test
public void testExactNameNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(EXACT_MATCH_NAMES,List.of("Joey"))),
0
);
}
@Test
public void testNamePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Mr Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"))),
1
);
}
@Test
public void testNameNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Jay";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"))),
0
);
}
@Test
public void testNamePrefixOnlyPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joey";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"), PREFIX_ONLY, true)),
1
);
}
@Test
public void testNamePrefixOnlyNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joey"), PREFIX_ONLY, true)),
0
);
}
@Test
public void testIdentifierPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.identifier = "007";
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(IDENTIFIER, "007")),
1
);
}
@Test
public void testAfterPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(AFTER, 9L)),
1
);
}
@Test
public void testBeforePositive(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(BEFORE, 11L)),
1
);
}
@Test
public void testTitlePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setTitle("Sunday");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(TITLE, "Sunday")),
1
);
}
@Test
public void testDescriptionPositive(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("Once upon a time.");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(DESCRIPTION, "Once upon a time.")),
1
);
}
@Test
public void testMinLevelPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MIN_LEVEL, 4)),
1
);
}
@Test
public void testMinLevelNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MIN_LEVEL, 5)),
0
);
}
@Test
public void testDefaultResourcePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(DEFAULT_RESOURCE, true)),
1
);
}
@Test
public void testFollowedNamesPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
Supplier<List<String>> supplier = () -> List.of("admin");
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(FOLLOWED_ONLY, supplier)),
1
);
}
@Test
public void testExcludeBlockedPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
Supplier<List<String>> supplier = () -> List.of("admin");
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(EXCLUDE_BLOCKED, supplier)),
1
);
}
@Test
public void testIncludeMetadataPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_METADATA, true)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNotNull(result.metadata);
}
@Test
public void testIncludesMetadataNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_METADATA, false)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNull(result.metadata);
}
@Test
public void testIncludeStatusPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.status = new ArbitraryResourceStatus();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_STATUS, true)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNotNull(result.status);
}
@Test
public void testIncludeStatusNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.status = new ArbitraryResourceStatus();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_STATUS, false)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNull(result.status);
}
@Test
public void testLimit() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.name = "Joe";
ArbitraryResourceData data3 = new ArbitraryResourceData();
data3.name = "Joe";
filterListByMap(
List.of(data1, data2, data3),
NAME_LEVEL, new HashMap<>(Map.of(LIMIT, 2)),
2
);
}
@Test
public void testLimitZero() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(LIMIT, 0)),
1
);
}
@Test
public void testOffset() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
ArbitraryResourceData data3 = new ArbitraryResourceData();
data3.created = 3L;
data3.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data1, data2, data3),
NAME_LEVEL, new HashMap<>(Map.of(OFFSET, 1)),
2
);
Assert.assertNotNull(result.get(0));
Assert.assertTrue(2L == result.get(0).created);
}
@Test
public void testOrder() {
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data2, data1),
NAME_LEVEL, new HashMap<>(),
2
);
Assert.assertNotNull(result.get(0));
Assert.assertTrue( result.get(0).created == 1L );
}
@Test
public void testReverseOrder() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data1, data2),
NAME_LEVEL, new HashMap<>(Map.of(REVERSE, true)),
2);
Assert.assertNotNull( result.get(0));
Assert.assertTrue( result.get(0).created == 2L);
}
public static List<ArbitraryResourceData> filterListByMap(
List<ArbitraryResourceData> candidates,
Map<String, Integer> levelByName,
HashMap<String, Object> valueByKey,
int sizeToAssert) {
Optional<Service> service = Optional.ofNullable((Service) valueByKey.get(SERVICE));
Optional<String> query = Optional.ofNullable( (String) valueByKey.get(QUERY));
Optional<String> identifier = Optional.ofNullable((String) valueByKey.get(IDENTIFIER));
Optional<List<String>> names = Optional.ofNullable((List<String>) valueByKey.get(NAMES));
Optional<String> title = Optional.ofNullable((String) valueByKey.get(TITLE));
Optional<String> description = Optional.ofNullable((String) valueByKey.get(DESCRIPTION));
boolean prefixOnly = valueByKey.containsKey(PREFIX_ONLY);
Optional<List<String>> exactMatchNames = Optional.ofNullable((List<String>) valueByKey.get(EXACT_MATCH_NAMES));
boolean defaultResource = valueByKey.containsKey(DEFAULT_RESOURCE);
Optional<SearchMode> mode = Optional.of((SearchMode) valueByKey.getOrDefault(MODE, SearchMode.ALL));
Optional<Integer> minLevel = Optional.ofNullable((Integer) valueByKey.get(MIN_LEVEL));
Optional<Supplier<List<String>>> followedOnly = Optional.ofNullable((Supplier<List<String>>) valueByKey.get(FOLLOWED_ONLY));
Optional<Supplier<List<String>>> excludeBlocked = Optional.ofNullable((Supplier<List<String>>) valueByKey.get(EXCLUDE_BLOCKED));
Optional<Boolean> includeMetadata = Optional.ofNullable((Boolean) valueByKey.get(INCLUDE_METADATA));
Optional<Boolean> includeStatus = Optional.ofNullable((Boolean) valueByKey.get(INCLUDE_STATUS));
Optional<Long> before = Optional.ofNullable((Long) valueByKey.get(BEFORE));
Optional<Long> after = Optional.ofNullable((Long) valueByKey.get(AFTER));
Optional<Integer> limit = Optional.ofNullable((Integer) valueByKey.get(LIMIT));
Optional<Integer> offset = Optional.ofNullable((Integer) valueByKey.get(OFFSET));
Optional<Boolean> reverse = Optional.ofNullable((Boolean) valueByKey.get(REVERSE));
List<ArbitraryResourceData> filteredList
= HSQLDBCacheUtils.filterList(
candidates,
levelByName,
mode,
service,
query,
identifier,
names,
title,
description,
prefixOnly,
exactMatchNames,
defaultResource,
minLevel,
followedOnly,
excludeBlocked,
includeMetadata,
includeStatus,
before,
after,
limit,
offset,
reverse);
Assert.assertEquals(sizeToAssert, filteredList.size());
return filteredList;
}
}