forked from Qortal/qortal
Compare commits
230 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6f628be053 | ||
|
eb07c45955 | ||
|
8bea11bc52 | ||
|
415f594b25 | ||
|
1e593cdf13 | ||
71d2fbe0b6 | |||
|
5a760db37d | ||
|
05d629e717 | ||
|
cea63e7ec7 | ||
|
5fabc7792c | ||
|
09d0af9b78 | ||
|
698e616bc9 | ||
6c0a9b3539 | |||
|
60811f9f65 | ||
|
d91a777ffd | ||
|
c19cad020e | ||
52519e3662 | |||
fd62e6156c | |||
e5890b3b6f | |||
256baeb1f4 | |||
05b83ade47 | |||
f7cb4ce264 | |||
086ed6574f | |||
|
4b56690118 | ||
|
44d26b513a | ||
|
dbd900f74a | ||
|
38463f6b1a | ||
|
16e48aba04 | ||
|
56d97457a1 | ||
|
2167d2f8fe | ||
|
8425d62673 | ||
4995bee3e3 | |||
|
87897d7db8 | ||
|
49e9a53c6a | ||
|
b5c4599005 | ||
|
3aabedda92 | ||
|
dd88decc40 | ||
|
4f3b4e4a58 | ||
|
b2c72c3927 | ||
|
65c014b215 | ||
|
b2579a457c | ||
|
170668ef78 | ||
|
b48b6b9d42 | ||
|
22dc3e55df | ||
|
66bfed93ee | ||
b8e1712881 | |||
6a5013d378 | |||
|
3687455c62 | ||
|
60b3bacd15 | ||
|
7a7f0e53ac | ||
|
940c641759 | ||
|
a3bb6638bf | ||
|
5b402e0bca | ||
|
89236d6504 | ||
|
47e313067f | ||
|
92077f2912 | ||
|
95e12395ae | ||
|
47e5c473b3 | ||
|
15f793ccb4 | ||
|
ccb59559d6 | ||
|
30c5136c44 | ||
|
91a58c50e1 | ||
f8daf50ccb | |||
|
8e0e455d41 | ||
6145db5357 | |||
|
7ccd06e5c3 | ||
517f7b92d5 | |||
|
fa8b9f2cee | ||
d66616f375 | |||
|
02e10e9de9 | ||
|
61c010754e | ||
|
5013c68b61 | ||
140d86e209 | |||
9e4925c8dd | |||
|
88fe3b0af6 | ||
|
e6f032a2a9 | ||
ca88cb1f88 | |||
58ab02c4f0 | |||
e1ea8d65f8 | |||
1c52c18d32 | |||
2cd5f9e4cd | |||
f2b5802d9c | |||
bc4e0716db | |||
994761a87e | |||
5780a6de7d | |||
8c811ef1ef | |||
|
f5a4a0a16c | ||
|
93dab1a3e3 | ||
|
7d14d381bc | ||
|
6511086d18 | ||
70ae122f5c | |||
|
33475ace00 | ||
|
88d009c979 | ||
|
26a345a909 | ||
|
618945620d | ||
|
b6d3e407c8 | ||
|
4b74bb37dc | ||
|
17b2bf3848 | ||
|
1f6ee72fc5 | ||
|
83bc84909a | ||
|
144d6cc5c7 | ||
|
eff2e6d150 | ||
|
c1041d2ad3 | ||
|
699d8815c4 | ||
|
2a97fba108 | ||
|
f1a0472c57 | ||
|
c4d8a17355 | ||
|
9c1cb9da77 | ||
|
7dae60d35f | ||
|
8421336016 | ||
|
2e7cd93716 | ||
|
2cf0aeac22 | ||
|
cc4056047e | ||
|
421e241729 | ||
|
c977660c47 | ||
|
867d0e29e0 | ||
|
57d12b4afe | ||
aeab1acbbc | |||
|
0b37666d2b | ||
|
bcf3538d18 | ||
|
b2d9d0539e | ||
|
1bd6076e33 | ||
|
a6309e925b | ||
|
23de8a98bc | ||
|
d0a85d4717 | ||
|
a893888a2e | ||
|
bd4472c2c0 | ||
|
10dda255e2 | ||
|
934c23402a | ||
|
4188f18a9a | ||
|
e48fd96c1e | ||
|
e76694e214 | ||
|
dbf49309ec | ||
|
ab4730cef0 | ||
|
7f3c1d553f | ||
ab0ef85458 | |||
b64674783a | |||
|
92fb52220a | ||
|
2d0bdca8dc | ||
2e9f358d0b | |||
6a6380e9e7 | |||
|
11c2d1bd14 | ||
1d79df078e | |||
|
4baafd1305 | ||
|
0fae20a3c3 | ||
f8cee2e0b7 | |||
|
676885ea2d | ||
|
a90f217212 | ||
|
e40a77542b | ||
|
80b24b185f | ||
|
1f4ca6263f | ||
|
df37372180 | ||
086b0809d6 | |||
33650cc432 | |||
c22abc440b | |||
258eb3a0f9 | |||
|
91ceafe0e3 | ||
|
9017db725e | ||
|
a42f214358 | ||
ecd4233dd0 | |||
e5b6e893cd | |||
9e45d640bc | |||
|
faee7c8f6a | ||
ca238c995e | |||
e434a28d00 | |||
996d5e0e86 | |||
8b797b5bd5 | |||
|
999cfafe00 | ||
4991618f19 | |||
|
4c35239bb1 | ||
d6cf45b311 | |||
|
ea9a24dca2 | ||
|
72f0194487 | ||
|
b2dbcbb603 | ||
|
69cba78d94 | ||
|
70f4ff4fb3 | ||
|
a8a8904ebf | ||
|
2805bb8364 | ||
|
d9a7648d36 | ||
|
2392b7b155 | ||
|
f5d338435a | ||
|
8f6b55a98b | ||
|
278243f01c | ||
|
756f3a243d | ||
|
950c4a5b35 | ||
|
ebc58c5c5c | ||
|
8bbb994876 | ||
|
c2ba9d142c | ||
|
a300ac2393 | ||
|
bdbbd0152f | ||
|
45d88c1bac | ||
|
3952705edd | ||
|
4f0aabfb36 | ||
5ac0027b5a | |||
e9b75b051b | |||
|
c71f5fa8bf | ||
|
5e145de52b | ||
|
543d0a7d22 | ||
|
5346c97922 | ||
|
c2bfa26376 | ||
|
386387fa16 | ||
071325cf6d | |||
a23eb02182 | |||
04203e7c31 | |||
|
749143e98e | ||
9b20192b30 | |||
8d6830135c | |||
448b536238 | |||
2e989aaa57 | |||
|
8bd293ccd5 | ||
|
a8d73926b3 | ||
|
bd214a0fd3 | ||
|
2347118e59 | ||
|
7fb093e98a | ||
|
e3a85786e7 | ||
|
adbba0f947 | ||
61dec0e4b7 | |||
|
15105306d1 | ||
|
3ddef1e13f | ||
|
08a2284ce4 | ||
|
2e3f97b51f | ||
|
84b973773a | ||
|
8ffb0625a1 | ||
7803d6c8f5 | |||
|
652c902607 | ||
|
915bb1ded3 | ||
|
3d83a79014 | ||
|
82d5d25c59 | ||
|
1676098abe | ||
|
991636ccad |
6
.github/workflows/pr-testing.yml
vendored
6
.github/workflows/pr-testing.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: PR testing
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
@@ -22,6 +22,10 @@ jobs:
|
||||
java-version: '11'
|
||||
distribution: 'adopt'
|
||||
|
||||
- name: Load custom deps
|
||||
run: |
|
||||
mvn install -DskipTests=true --file pom.xml
|
||||
|
||||
- name: Run all tests
|
||||
run: |
|
||||
mvn -B clean test -DskipTests=false --file pom.xml
|
||||
|
15
README.md
15
README.md
@@ -15,20 +15,31 @@ Building the future one block at a time. Welcome to Qortal.
|
||||
|
||||
# Building the Qortal Core from Source
|
||||
|
||||
## Build / run
|
||||
## Build / Run
|
||||
|
||||
- Requires Java 11. OpenJDK 11 recommended over Java SE.
|
||||
- Install Maven
|
||||
- Use Maven to fetch dependencies and build: `mvn clean package`
|
||||
- Update Maven dependencies: `mvn install`
|
||||
- Built JAR should be something like `target/qortal-1.0.jar`
|
||||
- Create basic *settings.json* file: `echo '{}' > settings.json`
|
||||
- Run JAR in same working directory as *settings.json*: `java -jar target/qortal-1.0.jar`
|
||||
- Wrap in shell script, add JVM flags, redirection, backgrounding, etc. as necessary.
|
||||
- Or use supplied example shell script: *start.sh*
|
||||
|
||||
## IntelliJ IDEA Configuration
|
||||
|
||||
- Run -> Edit Configurations
|
||||
- Add New Application
|
||||
- Name: qortal
|
||||
- SDK: java 11
|
||||
- Main Class: org.qortal.controller.Controller
|
||||
- Program arguments: settings.json -Dlog4j.configurationFile=log4j2.properties -ea
|
||||
- Environment variables: Djava.net.preferIPv4Stack=false
|
||||
|
||||
# Using a pre-built Qortal 'jar' binary
|
||||
|
||||
If you would prefer to utilize a released version of Qortal, you may do so by downloading one of the available releases from the releases page, that are also linked on https://qortal.org and https://qortal.dev.
|
||||
If you prefer to utilize a released version of Qortal, you may do so by downloading one of the available releases from the releases page, that are also linked on https://qortal.org and https://qortal.dev.
|
||||
|
||||
# Learning Q-App Development
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
{
|
||||
"apiDocumentationEnabled": true
|
||||
"apiDocumentationEnabled": true,
|
||||
"apiWhitelistEnabled": false
|
||||
}
|
||||
|
BIN
WindowsInstaller/Nice-Qortal-Logo-crop.bmp
Normal file
BIN
WindowsInstaller/Nice-Qortal-Logo-crop.bmp
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.7 MiB |
BIN
WindowsInstaller/Nice-Qortal-Logo-crop.png
Normal file
BIN
WindowsInstaller/Nice-Qortal-Logo-crop.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 160 KiB |
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,9 @@
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* AdvancedInstaller v19.4 or better, and enterprise licence if translations are required
|
||||
* AdvancedInstaller v19.4 or better, and enterprise licence.
|
||||
* Qortal has an open source license, however it currently (as of December 2024) only supports up to version 19. (We may need to reach out to Advanced Installer again to obtain a new license at some point, if needed.
|
||||
* Reach out to @crowetic for links to the installer install files, and license.
|
||||
* Installed AdoptOpenJDK v17 64bit, full JDK *not* JRE
|
||||
|
||||
## General build instructions
|
||||
@@ -10,6 +12,12 @@
|
||||
If this is your first time opening the `qortal.aip` file then you might need to adjust
|
||||
configured paths, or create a dummy `D:` drive with the expected layout.
|
||||
|
||||
Opening the aip file from within a clone of the qortal repo also works, if you have a separate windows machine setup to do the build.
|
||||
|
||||
You May need to change the location of the 'jre64' files inside Advanced Installer, if it is set to a path that your build machine doesn't have.
|
||||
|
||||
The Java Memory Arguments can be set manually, but as of December 2024 they have been reset back to system defaults. This should include G1GC Garbage Collector.
|
||||
|
||||
Typical build procedure:
|
||||
|
||||
* Place the `qortal.jar` file in `Install-Files\`
|
||||
|
Binary file not shown.
@@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.3.7</version>
|
||||
<description>POM was created from install:install-file</description>
|
||||
</project>
|
Binary file not shown.
@@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.3.8</version>
|
||||
<description>POM was created from install:install-file</description>
|
||||
</project>
|
Binary file not shown.
@@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.4.0</version>
|
||||
<description>POM was created from install:install-file</description>
|
||||
</project>
|
Binary file not shown.
@@ -1,123 +0,0 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.4.1</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<skipTests>false</skipTests>
|
||||
<bouncycastle.version>1.69</bouncycastle.version>
|
||||
<junit.version>4.13.2</junit.version>
|
||||
<maven-compiler-plugin.version>3.11.0</maven-compiler-plugin.version>
|
||||
<maven-jar-plugin.version>3.3.0</maven-jar-plugin.version>
|
||||
<maven-javadoc-plugin.version>3.6.3</maven-javadoc-plugin.version>
|
||||
<maven-source-plugin.version>3.3.0</maven-source-plugin.version>
|
||||
<maven-surefire-plugin.version>3.2.2</maven-surefire-plugin.version>
|
||||
</properties>
|
||||
|
||||
<build>
|
||||
<sourceDirectory>src/main/java</sourceDirectory>
|
||||
<testSourceDirectory>src/test/java</testSourceDirectory>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
<configuration>
|
||||
<source>11</source>
|
||||
<target>11</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>${maven-surefire-plugin.version}</version>
|
||||
<configuration>
|
||||
<skipTests>${skipTests}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>${maven-source-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>${maven-javadoc-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadoc</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>${maven-jar-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>${maven-surefire-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>${maven-source-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>${maven-javadoc-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>${maven-jar-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk15on</artifactId>
|
||||
<version>${bouncycastle.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>${junit.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
Binary file not shown.
@@ -1,123 +0,0 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.4.2</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<skipTests>false</skipTests>
|
||||
<bouncycastle.version>1.70</bouncycastle.version>
|
||||
<junit.version>4.13.2</junit.version>
|
||||
<maven-compiler-plugin.version>3.13.0</maven-compiler-plugin.version>
|
||||
<maven-source-plugin.version>3.3.0</maven-source-plugin.version>
|
||||
<maven-javadoc-plugin.version>3.6.3</maven-javadoc-plugin.version>
|
||||
<maven-surefire-plugin.version>3.2.5</maven-surefire-plugin.version>
|
||||
<maven-jar-plugin.version>3.4.1</maven-jar-plugin.version>
|
||||
</properties>
|
||||
|
||||
<build>
|
||||
<sourceDirectory>src/main/java</sourceDirectory>
|
||||
<testSourceDirectory>src/test/java</testSourceDirectory>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
<configuration>
|
||||
<source>11</source>
|
||||
<target>11</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>${maven-surefire-plugin.version}</version>
|
||||
<configuration>
|
||||
<skipTests>${skipTests}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>${maven-source-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>${maven-javadoc-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadoc</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>${maven-jar-plugin.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>${maven-surefire-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>${maven-source-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>${maven-javadoc-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>${maven-jar-plugin.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk15on</artifactId>
|
||||
<version>${bouncycastle.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>${junit.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
@@ -1,16 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<metadata>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<versioning>
|
||||
<release>1.4.2</release>
|
||||
<versions>
|
||||
<version>1.3.7</version>
|
||||
<version>1.3.8</version>
|
||||
<version>1.4.0</version>
|
||||
<version>1.4.1</version>
|
||||
<version>1.4.2</version>
|
||||
</versions>
|
||||
<lastUpdated>20240426084210</lastUpdated>
|
||||
</versioning>
|
||||
</metadata>
|
5
lib/org/hsqldb/hsqldb/2.7.4/Notes.txt
Normal file
5
lib/org/hsqldb/hsqldb/2.7.4/Notes.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
This is the production hsqldb-2.7.4 with the manifest file updated
|
||||
|
||||
Sealed: false
|
||||
|
||||
Allows the addition of the custom Qortal HSQLDBPool and Monitoring Classes
|
BIN
lib/org/hsqldb/hsqldb/2.7.4/hsqldb.jar
Normal file
BIN
lib/org/hsqldb/hsqldb/2.7.4/hsqldb.jar
Normal file
Binary file not shown.
94
pom.xml
94
pom.xml
@@ -3,18 +3,19 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.qortal</groupId>
|
||||
<artifactId>qortal</artifactId>
|
||||
<version>4.6.4</version>
|
||||
<version>5.1.0</version> <!-- Version must be <X.Y.Z> -->
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<skipTests>true</skipTests>
|
||||
|
||||
<altcoinj.version>7dc8c6f</altcoinj.version>
|
||||
<bitcoinj.version>0.15.10</bitcoinj.version>
|
||||
<bouncycastle.version>1.70</bouncycastle.version>
|
||||
<skipJUnitTests>true</skipJUnitTests>
|
||||
|
||||
<altcoinj.version>d7cf6ac</altcoinj.version> <!-- BC v16 / Updated Abstract Classes / alertSigningKey -->
|
||||
<bitcoinj.version>0.16.3</bitcoinj.version>
|
||||
<bouncycastle.version>1.73</bouncycastle.version>
|
||||
<build.timestamp>${maven.build.timestamp}</build.timestamp>
|
||||
<ciyam-at.version>1.4.2</ciyam-at.version>
|
||||
<ciyam-at.version>1b731d1</ciyam-at.version> <!-- This is the hash for v1.4.3 -->
|
||||
<commons-net.version>3.8.0</commons-net.version>
|
||||
<!-- <commons-net.version>3.9.0</commons-net.version> v5.2.0 coming soon -->
|
||||
<commons-text.version>1.12.0</commons-text.version>
|
||||
<commons-io.version>2.18.0</commons-io.version>
|
||||
<commons-compress.version>1.27.1</commons-compress.version>
|
||||
@@ -23,6 +24,7 @@
|
||||
<extendedset.version>0.12.3</extendedset.version>
|
||||
<git-commit-id-plugin.version>4.9.10</git-commit-id-plugin.version>
|
||||
<grpc.version>1.68.1</grpc.version>
|
||||
<!-- <grpc.version>1.68.3</grpc.version> v5.2.0 coming soon -->
|
||||
<guava.version>33.3.1-jre</guava.version>
|
||||
<hamcrest-library.version>2.2</hamcrest-library.version>
|
||||
<homoglyph.version>1.2.1</homoglyph.version>
|
||||
@@ -33,6 +35,7 @@
|
||||
<jaxb-runtime.version>2.3.9</jaxb-runtime.version>
|
||||
<jersey.version>2.42</jersey.version>
|
||||
<jetty.version>9.4.56.v20240826</jetty.version>
|
||||
<!-- <jetty.version>9.4.57.v20241219</jetty.version> v5.2.0 Coming Soon -->
|
||||
<json-simple.version>1.1.1</json-simple.version>
|
||||
<json.version>20240303</json.version>
|
||||
<jsoup.version>1.18.1</jsoup.version>
|
||||
@@ -49,11 +52,16 @@
|
||||
<maven-reproducible-build-plugin.version>0.17</maven-reproducible-build-plugin.version>
|
||||
<maven-resources-plugin.version>3.3.1</maven-resources-plugin.version>
|
||||
<maven-shade-plugin.version>3.6.0</maven-shade-plugin.version>
|
||||
<maven-install-plugin.version>3.1.3</maven-install-plugin.version>
|
||||
<maven-surefire-plugin.version>3.5.2</maven-surefire-plugin.version>
|
||||
<!-- <maven-surefire-plugin.version>3.5.3</maven-surefire-plugin.version> v5.2.0 Coming Soon -->
|
||||
<protobuf.version>3.25.3</protobuf.version>
|
||||
<!-- <protobuf.version>3.25.7</protobuf.version> v 5.1 -->
|
||||
<replacer.version>1.5.3</replacer.version>
|
||||
<simplemagic.version>1.17</simplemagic.version>
|
||||
<slf4j.version>1.7.36</slf4j.version>
|
||||
<!-- <swagger-api.version>2.2.30</swagger-api.version> need code upgrade Future Release -->
|
||||
<!-- <swagger-api.version>2.1.13</swagger-api.version> need code upgrade Future Release -->
|
||||
<swagger-api.version>2.0.10</swagger-api.version>
|
||||
<swagger-ui.version>5.18.2</swagger-ui.version>
|
||||
<upnp.version>1.2</upnp.version>
|
||||
@@ -289,20 +297,48 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>${maven-jar-plugin.version}</version>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultEntries>false</addDefaultEntries>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
</manifest>
|
||||
<manifestEntries>
|
||||
<Last-Commit-Id>${git.commit.id.full}</Last-Commit-Id>
|
||||
<Last-Commit-Time>${git.commit.time}</Last-Commit-Time>
|
||||
<Reproducible-Build>true</Reproducible-Build>
|
||||
</manifestEntries>
|
||||
</archive>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultEntries>false</addDefaultEntries>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
</manifest>
|
||||
<manifestEntries>
|
||||
<Last-Commit-Id>${git.commit.id.full}</Last-Commit-Id>
|
||||
<Last-Commit-Time>${git.commit.time}</Last-Commit-Time>
|
||||
<Reproducible-Build>true</Reproducible-Build>
|
||||
</manifestEntries>
|
||||
</archive>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- Copy modified hsqldb.jar to install / modified MANIFEST.MF-->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-install-plugin</artifactId>
|
||||
<version>${maven-install-plugin.version}</version>
|
||||
<configuration>
|
||||
<groupId>org.hsqldb</groupId>
|
||||
<artifactId>hsqldb</artifactId>
|
||||
<version>${hsqldb.version}</version>
|
||||
<packaging>jar</packaging>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>install</phase>
|
||||
<goals>
|
||||
<goal>install-file</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<file>${project.basedir}/lib/org/hsqldb/hsqldb/${hsqldb.version}/hsqldb.jar</file>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
@@ -352,6 +388,7 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- Removed, now use Maven reproducible by default v4.0, IntelliJ v2025.1 and later -->
|
||||
<plugin>
|
||||
<groupId>io.github.zlika</groupId>
|
||||
<artifactId>reproducible-build-maven-plugin</artifactId>
|
||||
@@ -374,7 +411,7 @@
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>${maven-surefire-plugin.version}</version>
|
||||
<configuration>
|
||||
<skipTests>${skipTests}</skipTests>
|
||||
<skipTests>${skipJUnitTests}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
@@ -450,7 +487,7 @@
|
||||
<scope>provided</scope>
|
||||
<!-- needed for build, not for runtime -->
|
||||
</dependency>
|
||||
<!-- HSQLDB for repository -->
|
||||
<!-- HSQLDB for repository should use local version with Sealed: false -->
|
||||
<dependency>
|
||||
<groupId>org.hsqldb</groupId>
|
||||
<artifactId>hsqldb</artifactId>
|
||||
@@ -458,7 +495,7 @@
|
||||
</dependency>
|
||||
<!-- CIYAM AT (automated transactions) -->
|
||||
<dependency>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<groupId>com.github.iceburst</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>${ciyam-at.version}</version>
|
||||
</dependency>
|
||||
@@ -476,7 +513,7 @@
|
||||
</dependency>
|
||||
<!-- For Litecoin, etc. support, requires bitcoinj -->
|
||||
<dependency>
|
||||
<groupId>com.github.qortal</groupId>
|
||||
<groupId>com.github.iceburst</groupId>
|
||||
<artifactId>altcoinj</artifactId>
|
||||
<version>${altcoinj.version}</version>
|
||||
</dependency>
|
||||
@@ -721,12 +758,12 @@
|
||||
<!-- BouncyCastle for crypto, including TLS secure networking -->
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk15on</artifactId>
|
||||
<artifactId>bcprov-jdk15to18</artifactId>
|
||||
<version>${bouncycastle.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bctls-jdk15on</artifactId>
|
||||
<artifactId>bctls-jdk15to18</artifactId>
|
||||
<version>${bouncycastle.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@@ -770,5 +807,10 @@
|
||||
<artifactId>jaxb-runtime</artifactId>
|
||||
<version>${jaxb-runtime.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.tika</groupId>
|
||||
<artifactId>tika-core</artifactId>
|
||||
<version>3.1.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
173
src/main/java/org/hsqldb/jdbc/HSQLDBPoolMonitored.java
Normal file
173
src/main/java/org/hsqldb/jdbc/HSQLDBPoolMonitored.java
Normal file
@@ -0,0 +1,173 @@
|
||||
package org.hsqldb.jdbc;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.hsqldb.jdbc.pool.JDBCPooledConnection;
|
||||
import org.qortal.data.system.DbConnectionInfo;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
|
||||
import javax.sql.ConnectionEvent;
|
||||
import javax.sql.PooledConnection;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Class HSQLDBPoolMonitored
|
||||
*
|
||||
* This class uses the same logic as HSQLDBPool. The only difference is it monitors the state of every connection
|
||||
* to the database. This is used for debugging purposes only.
|
||||
*/
|
||||
public class HSQLDBPoolMonitored extends HSQLDBPool {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBRepositoryFactory.class);
|
||||
|
||||
private static final String EMPTY = "Empty";
|
||||
private static final String AVAILABLE = "Available";
|
||||
private static final String ALLOCATED = "Allocated";
|
||||
|
||||
private ConcurrentHashMap<Integer, DbConnectionInfo> infoByIndex;
|
||||
|
||||
public HSQLDBPoolMonitored(int poolSize) {
|
||||
super(poolSize);
|
||||
|
||||
this.infoByIndex = new ConcurrentHashMap<>(poolSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to retrieve a new connection using the properties that have already been
|
||||
* set.
|
||||
*
|
||||
* @return a connection to the data source, or null if no spare connections in pool
|
||||
* @exception SQLException if a database access error occurs
|
||||
*/
|
||||
public Connection tryConnection() throws SQLException {
|
||||
for (int i = 0; i < states.length(); i++) {
|
||||
if (states.compareAndSet(i, RefState.available, RefState.allocated)) {
|
||||
JDBCPooledConnection pooledConnection = connections[i];
|
||||
|
||||
if (pooledConnection == null)
|
||||
// Probably shutdown situation
|
||||
return null;
|
||||
|
||||
infoByIndex.put(i, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), ALLOCATED));
|
||||
|
||||
return pooledConnection.getConnection();
|
||||
}
|
||||
|
||||
if (states.compareAndSet(i, RefState.empty, RefState.allocated)) {
|
||||
try {
|
||||
JDBCPooledConnection pooledConnection = (JDBCPooledConnection) source.getPooledConnection();
|
||||
|
||||
if (pooledConnection == null)
|
||||
// Probably shutdown situation
|
||||
return null;
|
||||
|
||||
pooledConnection.addConnectionEventListener(this);
|
||||
pooledConnection.addStatementEventListener(this);
|
||||
connections[i] = pooledConnection;
|
||||
|
||||
infoByIndex.put(i, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), ALLOCATED));
|
||||
|
||||
return pooledConnection.getConnection();
|
||||
} catch (SQLException e) {
|
||||
states.set(i, RefState.empty);
|
||||
infoByIndex.put(i, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), EMPTY));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
int var1 = 300;
|
||||
if (this.source.loginTimeout != 0) {
|
||||
var1 = this.source.loginTimeout * 10;
|
||||
}
|
||||
|
||||
if (this.closed) {
|
||||
throw new SQLException("connection pool is closed");
|
||||
} else {
|
||||
for(int var2 = 0; var2 < var1; ++var2) {
|
||||
for(int var3 = 0; var3 < this.states.length(); ++var3) {
|
||||
if (this.states.compareAndSet(var3, 1, 2)) {
|
||||
infoByIndex.put(var3, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), ALLOCATED));
|
||||
return this.connections[var3].getConnection();
|
||||
}
|
||||
|
||||
if (this.states.compareAndSet(var3, 0, 2)) {
|
||||
try {
|
||||
JDBCPooledConnection var4 = (JDBCPooledConnection)this.source.getPooledConnection();
|
||||
var4.addConnectionEventListener(this);
|
||||
var4.addStatementEventListener(this);
|
||||
this.connections[var3] = var4;
|
||||
|
||||
infoByIndex.put(var3, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), ALLOCATED));
|
||||
|
||||
return this.connections[var3].getConnection();
|
||||
} catch (SQLException var6) {
|
||||
this.states.set(var3, 0);
|
||||
infoByIndex.put(var3, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), EMPTY));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(100L);
|
||||
} catch (InterruptedException var5) {
|
||||
}
|
||||
}
|
||||
|
||||
throw JDBCUtil.invalidArgument();
|
||||
}
|
||||
}
|
||||
|
||||
public void connectionClosed(ConnectionEvent event) {
|
||||
PooledConnection connection = (PooledConnection) event.getSource();
|
||||
|
||||
for (int i = 0; i < connections.length; i++) {
|
||||
if (connections[i] == connection) {
|
||||
states.set(i, RefState.available);
|
||||
infoByIndex.put(i, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), AVAILABLE));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void connectionErrorOccurred(ConnectionEvent event) {
|
||||
PooledConnection connection = (PooledConnection) event.getSource();
|
||||
|
||||
for (int i = 0; i < connections.length; i++) {
|
||||
if (connections[i] == connection) {
|
||||
states.set(i, RefState.allocated);
|
||||
connections[i] = null;
|
||||
states.set(i, RefState.empty);
|
||||
infoByIndex.put(i, new DbConnectionInfo(System.currentTimeMillis(), Thread.currentThread().getName(), EMPTY));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public List<DbConnectionInfo> getDbConnectionsStates() {
|
||||
|
||||
return infoByIndex.values().stream()
|
||||
.sorted(Comparator.comparingLong(DbConnectionInfo::getUpdated))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private int findConnectionIndex(ConnectionEvent connectionEvent) {
|
||||
PooledConnection pooledConnection = (PooledConnection) connectionEvent.getSource();
|
||||
|
||||
for(int i = 0; i < this.connections.length; ++i) {
|
||||
if (this.connections[i] == pooledConnection) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
@@ -2,23 +2,30 @@ package org.qortal.account;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.NameRepository;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Groups;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.qortal.utils.Amounts.prettyAmount;
|
||||
|
||||
@@ -216,9 +223,18 @@ public class Account {
|
||||
String myAddress = accountData.getAddress();
|
||||
|
||||
int blockchainHeight = this.repository.getBlockRepository().getBlockchainHeight();
|
||||
int levelToMint = BlockChain.getInstance().getMinAccountLevelToMint();
|
||||
|
||||
int levelToMint;
|
||||
|
||||
if( blockchainHeight >= BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() ) {
|
||||
levelToMint = 0;
|
||||
}
|
||||
else {
|
||||
levelToMint = BlockChain.getInstance().getMinAccountLevelToMint();
|
||||
}
|
||||
|
||||
int level = accountData.getLevel();
|
||||
int groupIdToMint = BlockChain.getInstance().getMintingGroupId();
|
||||
List<Integer> groupIdsToMint = Groups.getGroupIdsToMint( BlockChain.getInstance(), blockchainHeight );
|
||||
int nameCheckHeight = BlockChain.getInstance().getOnlyMintWithNameHeight();
|
||||
int groupCheckHeight = BlockChain.getInstance().getGroupMemberCheckHeight();
|
||||
int removeNameCheckHeight = BlockChain.getInstance().getRemoveOnlyMintWithNameHeight();
|
||||
@@ -252,9 +268,9 @@ public class Account {
|
||||
if (blockchainHeight >= groupCheckHeight && blockchainHeight < removeNameCheckHeight) {
|
||||
List<NameData> myName = nameRepository.getNamesByOwner(myAddress);
|
||||
if (Account.isFounder(accountData.getFlags())) {
|
||||
return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
||||
return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
|
||||
} else {
|
||||
return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
||||
return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,9 +279,9 @@ public class Account {
|
||||
// Account's address is a member of the minter group
|
||||
if (blockchainHeight >= removeNameCheckHeight) {
|
||||
if (Account.isFounder(accountData.getFlags())) {
|
||||
return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
||||
return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
|
||||
} else {
|
||||
return level >= levelToMint && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
||||
return level >= levelToMint && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +322,9 @@ public class Account {
|
||||
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
|
||||
return true;
|
||||
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() >= BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() )
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -349,10 +368,164 @@ public class Account {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 'effective' minting level, or zero if reward-share does not exist.
|
||||
* Get Primary Name
|
||||
*
|
||||
* @return the primary name for this address if present, otherwise empty
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> getPrimaryName() throws DataException {
|
||||
|
||||
return this.repository.getNameRepository().getPrimaryName(this.address);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove Primary Name
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public void removePrimaryName() throws DataException {
|
||||
this.repository.getNameRepository().removePrimaryName(this.address);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset Primary Name
|
||||
*
|
||||
* Set primary name based on the names (and their history) this account owns.
|
||||
*
|
||||
* @param confirmationStatus the status of the transactions for the determining the primary name
|
||||
*
|
||||
* @return the primary name, empty if their isn't one
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> resetPrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
|
||||
Optional<String> primaryName = determinePrimaryName(confirmationStatus);
|
||||
|
||||
if(primaryName.isPresent()) {
|
||||
return setPrimaryName(primaryName.get());
|
||||
}
|
||||
else {
|
||||
return primaryName;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine Primary Name
|
||||
*
|
||||
* Determine primary name based on a list of registered names.
|
||||
*
|
||||
* @param confirmationStatus the status of the transactions for this determination
|
||||
*
|
||||
* @return the primary name, empty if there is no primary name
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> determinePrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
|
||||
|
||||
// all registered names for the owner
|
||||
List<NameData> names = this.repository.getNameRepository().getNamesByOwner(this.address);
|
||||
|
||||
Optional<String> primaryName;
|
||||
|
||||
// if no registered names, the no primary name possible
|
||||
if (names.isEmpty()) {
|
||||
primaryName = Optional.empty();
|
||||
}
|
||||
// if names
|
||||
else {
|
||||
// if one name, then that is the primary name
|
||||
if (names.size() == 1) {
|
||||
primaryName = Optional.of( names.get(0).getName() );
|
||||
}
|
||||
// if more than one name, then seek the earliest name acquisition that was never released
|
||||
else {
|
||||
Map<String, TransactionData> txByName = new HashMap<>(names.size());
|
||||
|
||||
// for each name, get the latest transaction
|
||||
for (NameData nameData : names) {
|
||||
|
||||
// since the name is currently registered to the owner,
|
||||
// we assume the latest transaction involving this name was the transaction that the acquired
|
||||
// name through registration, purchase or update
|
||||
Optional<TransactionData> latestTransaction
|
||||
= this.repository
|
||||
.getTransactionRepository()
|
||||
.getTransactionsInvolvingName(
|
||||
nameData.getName(),
|
||||
confirmationStatus
|
||||
)
|
||||
.stream()
|
||||
.sorted(Comparator.comparing(
|
||||
TransactionData::getTimestamp).reversed()
|
||||
)
|
||||
.findFirst(); // first is the last, since it was reversed
|
||||
|
||||
// if there is a latest transaction, expected for all registered names
|
||||
if (latestTransaction.isPresent()) {
|
||||
txByName.put(nameData.getName(), latestTransaction.get());
|
||||
}
|
||||
// if there is no latest transaction, then
|
||||
else {
|
||||
LOGGER.warn("No matching transaction for name: " + nameData.getName());
|
||||
}
|
||||
}
|
||||
|
||||
// get the first name aqcuistion for this address
|
||||
Optional<Map.Entry<String, TransactionData>> firstNameEntry
|
||||
= txByName.entrySet().stream().sorted(Comparator.comparing(entry -> entry.getValue().getTimestamp())).findFirst();
|
||||
|
||||
// if their is a name acquisition, then the first one is the primary name
|
||||
if (firstNameEntry.isPresent()) {
|
||||
primaryName = Optional.of( firstNameEntry.get().getKey() );
|
||||
}
|
||||
// if there is no nameacquistion, then there is no primary name
|
||||
else {
|
||||
primaryName = Optional.empty();
|
||||
}
|
||||
}
|
||||
}
|
||||
return primaryName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Primary Name
|
||||
*
|
||||
* @param primaryName the primary to set to this address
|
||||
*
|
||||
* @return the primary name if successful, empty if unsuccessful
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> setPrimaryName( String primaryName ) throws DataException {
|
||||
int changed = this.repository.getNameRepository().setPrimaryName(this.address, primaryName);
|
||||
|
||||
return changed > 0 ? Optional.of(primaryName) : Optional.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns reward-share minting address, or unknown if reward-share does not exist.
|
||||
*
|
||||
* @param repository
|
||||
* @param rewardSharePublicKey
|
||||
* @return address or unknown
|
||||
* @throws DataException
|
||||
*/
|
||||
public static String getRewardShareMintingAddress(Repository repository, byte[] rewardSharePublicKey) throws DataException {
|
||||
// Find actual minter address
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
|
||||
|
||||
if (rewardShareData == null)
|
||||
return "Unknown";
|
||||
|
||||
return rewardShareData.getMinter();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 'effective' minting level, or zero if reward-share does not exist.
|
||||
*
|
||||
* @param repository
|
||||
* @param rewardSharePublicKey
|
||||
* @return 0+
|
||||
* @throws DataException
|
||||
*/
|
||||
|
@@ -1,17 +1,41 @@
|
||||
package org.qortal.account;
|
||||
|
||||
import org.bouncycastle.crypto.generators.Ed25519KeyPairGenerator;
|
||||
import org.bouncycastle.crypto.params.Ed25519KeyGenerationParameters;
|
||||
import org.bouncycastle.crypto.params.Ed25519PublicKeyParameters;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class PublicKeyAccount extends Account {
|
||||
|
||||
protected final byte[] publicKey;
|
||||
protected final Ed25519PublicKeyParameters edPublicKeyParams;
|
||||
|
||||
/** <p>Constructor for generating a PublicKeyAccount</p>
|
||||
*
|
||||
* @param repository Block Chain
|
||||
* @param publicKey 32 byte Public Key
|
||||
* @since v4.7.3
|
||||
*/
|
||||
public PublicKeyAccount(Repository repository, byte[] publicKey) {
|
||||
this(repository, new Ed25519PublicKeyParameters(publicKey, 0));
|
||||
super(repository, Crypto.toAddress(publicKey));
|
||||
|
||||
Ed25519PublicKeyParameters t = null;
|
||||
try {
|
||||
t = new Ed25519PublicKeyParameters(publicKey, 0);
|
||||
} catch (Exception e) {
|
||||
var gen = new Ed25519KeyPairGenerator();
|
||||
gen.init(new Ed25519KeyGenerationParameters(new SecureRandom()));
|
||||
var keyPair = gen.generateKeyPair();
|
||||
t = (Ed25519PublicKeyParameters) keyPair.getPublic();
|
||||
} finally {
|
||||
this.edPublicKeyParams = t;
|
||||
}
|
||||
|
||||
this.publicKey = publicKey;
|
||||
}
|
||||
|
||||
protected PublicKeyAccount(Repository repository, Ed25519PublicKeyParameters edPublicKeyParams) {
|
||||
|
@@ -46,6 +46,7 @@ public class ApiService {
|
||||
private ApiService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.restricted.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
@@ -194,8 +195,10 @@ public class ApiService {
|
||||
|
||||
context.addServlet(AdminStatusWebSocket.class, "/websockets/admin/status");
|
||||
context.addServlet(BlocksWebSocket.class, "/websockets/blocks");
|
||||
context.addServlet(DataMonitorSocket.class, "/websockets/datamonitor");
|
||||
context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*");
|
||||
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
|
||||
context.addServlet(UnsignedFeesSocket.class, "/websockets/crosschain/unsignedfees");
|
||||
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");
|
||||
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
|
||||
context.addServlet(TradePresenceWebSocket.class, "/websockets/crosschain/tradepresence");
|
||||
|
@@ -40,6 +40,7 @@ public class DevProxyService {
|
||||
private DevProxyService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.proxy.resource", "org.qortal.api.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@@ -39,6 +39,7 @@ public class DomainMapService {
|
||||
private DomainMapService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.domainmap.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@@ -39,6 +39,7 @@ public class GatewayService {
|
||||
private GatewayService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.gateway.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@@ -1,14 +1,13 @@
|
||||
package org.qortal.api;
|
||||
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.select.Elements;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class HTMLParser {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
|
||||
@@ -22,10 +21,11 @@ public class HTMLParser {
|
||||
private String identifier;
|
||||
private String path;
|
||||
private String theme;
|
||||
private String lang;
|
||||
private boolean usingCustomRouting;
|
||||
|
||||
public HTMLParser(String resourceId, String inPath, String prefix, boolean includeResourceIdInPrefix, byte[] data,
|
||||
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting) {
|
||||
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting, String lang) {
|
||||
String inPathWithoutFilename = inPath.contains("/") ? inPath.substring(0, inPath.lastIndexOf('/')) : String.format("/%s",inPath);
|
||||
this.qdnBase = includeResourceIdInPrefix ? String.format("%s/%s", prefix, resourceId) : prefix;
|
||||
this.qdnBaseWithPath = includeResourceIdInPrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : String.format("%s%s", prefix, inPathWithoutFilename);
|
||||
@@ -36,6 +36,7 @@ public class HTMLParser {
|
||||
this.identifier = identifier;
|
||||
this.path = inPath;
|
||||
this.theme = theme;
|
||||
this.lang = lang;
|
||||
this.usingCustomRouting = usingCustomRouting;
|
||||
}
|
||||
|
||||
@@ -61,9 +62,13 @@ public class HTMLParser {
|
||||
String identifier = this.identifier != null ? this.identifier.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String path = this.path != null ? this.path.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String theme = this.theme != null ? this.theme.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String lang = this.lang != null ? this.lang.replace("\\", "").replace("\"", "\\\"") : "";
|
||||
String qdnBase = this.qdnBase != null ? this.qdnBase.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String qdnBaseWithPath = this.qdnBaseWithPath != null ? this.qdnBaseWithPath.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String qdnContextVar = String.format("<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>", qdnContext, theme, service, name, identifier, path, qdnBase, qdnBaseWithPath);
|
||||
String qdnContextVar = String.format(
|
||||
"<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnLang=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>",
|
||||
qdnContext, theme, lang, service, name, identifier, path, qdnBase, qdnBaseWithPath
|
||||
);
|
||||
head.get(0).prepend(qdnContextVar);
|
||||
|
||||
// Add base href tag
|
||||
|
@@ -1,7 +1,13 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
@@ -47,4 +53,31 @@ public class ApiOnlineAccount {
|
||||
return this.recipientAddress;
|
||||
}
|
||||
|
||||
public int getMinterLevelFromPublicKey() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return Account.getRewardShareEffectiveMintingLevel(repository, this.rewardSharePublicKey);
|
||||
} catch (DataException e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean getIsMember() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getGroupRepository().memberExists(694, getMinterAddress());
|
||||
} catch (DataException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// JAXB special
|
||||
|
||||
@XmlElement(name = "minterLevel")
|
||||
protected int getMinterLevel() {
|
||||
return getMinterLevelFromPublicKey();
|
||||
}
|
||||
|
||||
@XmlElement(name = "isMinterMember")
|
||||
protected boolean getMinterMember() {
|
||||
return getIsMember();
|
||||
}
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import java.math.BigInteger;
|
||||
public class BlockMintingInfo {
|
||||
|
||||
public byte[] minterPublicKey;
|
||||
public String minterAddress;
|
||||
public int minterLevel;
|
||||
public int onlineAccountsCount;
|
||||
public BigDecimal maxDistance;
|
||||
@@ -19,5 +20,4 @@ public class BlockMintingInfo {
|
||||
|
||||
public BlockMintingInfo() {
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -0,0 +1,72 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class CrossChainTradeLedgerEntry {
|
||||
|
||||
private String market;
|
||||
|
||||
private String currency;
|
||||
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private long quantity;
|
||||
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private long feeAmount;
|
||||
|
||||
private String feeCurrency;
|
||||
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private long totalPrice;
|
||||
|
||||
private long tradeTimestamp;
|
||||
|
||||
protected CrossChainTradeLedgerEntry() {
|
||||
/* For JAXB */
|
||||
}
|
||||
|
||||
public CrossChainTradeLedgerEntry(String market, String currency, long quantity, long feeAmount, String feeCurrency, long totalPrice, long tradeTimestamp) {
|
||||
this.market = market;
|
||||
this.currency = currency;
|
||||
this.quantity = quantity;
|
||||
this.feeAmount = feeAmount;
|
||||
this.feeCurrency = feeCurrency;
|
||||
this.totalPrice = totalPrice;
|
||||
this.tradeTimestamp = tradeTimestamp;
|
||||
}
|
||||
|
||||
public String getMarket() {
|
||||
return market;
|
||||
}
|
||||
|
||||
public String getCurrency() {
|
||||
return currency;
|
||||
}
|
||||
|
||||
public long getQuantity() {
|
||||
return quantity;
|
||||
}
|
||||
|
||||
public long getFeeAmount() {
|
||||
return feeAmount;
|
||||
}
|
||||
|
||||
public String getFeeCurrency() {
|
||||
return feeCurrency;
|
||||
}
|
||||
|
||||
public long getTotalPrice() {
|
||||
return totalPrice;
|
||||
}
|
||||
|
||||
public long getTradeTimestamp() {
|
||||
return tradeTimestamp;
|
||||
}
|
||||
}
|
50
src/main/java/org/qortal/api/model/DatasetStatus.java
Normal file
50
src/main/java/org/qortal/api/model/DatasetStatus.java
Normal file
@@ -0,0 +1,50 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import java.util.Objects;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class DatasetStatus {
|
||||
|
||||
private String name;
|
||||
|
||||
private long count;
|
||||
|
||||
public DatasetStatus() {}
|
||||
|
||||
public DatasetStatus(String name, long count) {
|
||||
this.name = name;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
DatasetStatus that = (DatasetStatus) o;
|
||||
return count == that.count && Objects.equals(name, that.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DatasetStatus{" +
|
||||
"name='" + name + '\'' +
|
||||
", count=" + count +
|
||||
'}';
|
||||
}
|
||||
}
|
@@ -304,11 +304,11 @@ public class BitcoinyTBDRequest {
|
||||
private String networkName;
|
||||
|
||||
/**
|
||||
* Fee Ceiling
|
||||
* Fee Required
|
||||
*
|
||||
* web search, LTC fee ceiling = 1000L
|
||||
* web search, LTC fee required = 1000L
|
||||
*/
|
||||
private long feeCeiling;
|
||||
private long feeRequired;
|
||||
|
||||
/**
|
||||
* Extended Public Key
|
||||
@@ -570,8 +570,8 @@ public class BitcoinyTBDRequest {
|
||||
return this.networkName;
|
||||
}
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return this.feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return this.feeRequired;
|
||||
}
|
||||
|
||||
public String getExtendedPublicKey() {
|
||||
@@ -671,7 +671,7 @@ public class BitcoinyTBDRequest {
|
||||
", minimumOrderAmount=" + minimumOrderAmount +
|
||||
", feePerKb=" + feePerKb +
|
||||
", networkName='" + networkName + '\'' +
|
||||
", feeCeiling=" + feeCeiling +
|
||||
", feeRequired=" + feeRequired +
|
||||
", extendedPublicKey='" + extendedPublicKey + '\'' +
|
||||
", sendAmount=" + sendAmount +
|
||||
", sendingFeePerByte=" + sendingFeePerByte +
|
||||
|
@@ -142,10 +142,20 @@ public class DevProxyServerResource {
|
||||
}
|
||||
}
|
||||
|
||||
String lang = request.getParameter("lang");
|
||||
if (lang == null || lang.isBlank()) {
|
||||
lang = "en"; // fallback
|
||||
}
|
||||
|
||||
String theme = request.getParameter("theme");
|
||||
if (theme == null || theme.isBlank()) {
|
||||
theme = "light";
|
||||
}
|
||||
|
||||
// Parse and modify output if needed
|
||||
if (HTMLParser.isHtmlFile(filename)) {
|
||||
// HTML file - needs to be parsed
|
||||
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, "light", true);
|
||||
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, theme , true, lang);
|
||||
htmlParser.addAdditionalHeaderTags();
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' ws:; font-src 'self' data:;");
|
||||
response.setContentType(con.getContentType());
|
||||
|
@@ -3,6 +3,7 @@ package org.qortal.api.resource;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import com.j256.simplemagic.ContentInfo;
|
||||
import com.j256.simplemagic.ContentInfoUtil;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
@@ -12,6 +13,7 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@@ -33,9 +35,13 @@ import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryMetadataManager;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.arbitrary.ArbitraryCategoryInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryDataIndexDetail;
|
||||
import org.qortal.data.arbitrary.ArbitraryDataIndexScoreKey;
|
||||
import org.qortal.data.arbitrary.ArbitraryDataIndexScorecard;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||
import org.qortal.data.arbitrary.IndexCache;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@@ -59,18 +65,36 @@ import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.FileNameMap;
|
||||
import java.net.URLConnection;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import org.apache.tika.Tika;
|
||||
import org.apache.tika.mime.MimeTypeException;
|
||||
import org.apache.tika.mime.MimeTypes;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import org.glassfish.jersey.media.multipart.FormDataParam;
|
||||
import static org.qortal.api.ApiError.REPOSITORY_ISSUE;
|
||||
|
||||
@Path("/arbitrary")
|
||||
@Tag(name = "Arbitrary")
|
||||
@@ -172,6 +196,7 @@ public class ArbitraryResource {
|
||||
@Parameter(description = "Name (searches name field only)") @QueryParam("name") List<String> names,
|
||||
@Parameter(description = "Title (searches title metadata field only)") @QueryParam("title") String title,
|
||||
@Parameter(description = "Description (searches description metadata field only)") @QueryParam("description") String description,
|
||||
@Parameter(description = "Keyword (searches description metadata field by keywords)") @QueryParam("keywords") List<String> keywords,
|
||||
@Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
|
||||
@Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly,
|
||||
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
|
||||
@@ -212,7 +237,7 @@ public class ArbitraryResource {
|
||||
}
|
||||
|
||||
List<ArbitraryResourceData> resources = repository.getArbitraryRepository()
|
||||
.searchArbitraryResources(service, query, identifier, names, title, description, usePrefixOnly,
|
||||
.searchArbitraryResources(service, query, identifier, names, title, description, keywords, usePrefixOnly,
|
||||
exactMatchNames, defaultRes, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus,
|
||||
before, after, limit, offset, reverse);
|
||||
|
||||
@@ -678,20 +703,20 @@ public class ArbitraryResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public HttpServletResponse get(@PathParam("service") Service service,
|
||||
public void get(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
|
||||
|
||||
// Authentication can be bypassed in the settings, for those running public QDN nodes
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
}
|
||||
|
||||
return this.download(service, name, null, filepath, encoding, rebuild, async, attempts);
|
||||
this.download(service, name, null, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -711,21 +736,21 @@ public class ArbitraryResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public HttpServletResponse get(@PathParam("service") Service service,
|
||||
public void get(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
|
||||
|
||||
// Authentication can be bypassed in the settings, for those running public QDN nodes
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
|
||||
Security.checkApiCallAllowed(request, null);
|
||||
}
|
||||
|
||||
return this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts);
|
||||
this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
|
||||
}
|
||||
|
||||
|
||||
@@ -870,6 +895,464 @@ public class ArbitraryResource {
|
||||
}
|
||||
|
||||
|
||||
@GET
|
||||
@Path("/check/tmp")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Check if the disk has enough disk space for an upcoming upload",
|
||||
responses = {
|
||||
@ApiResponse(description = "OK if sufficient space", responseCode = "200"),
|
||||
@ApiResponse(description = "Insufficient space", responseCode = "507") // 507 = Insufficient Storage
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response checkUploadSpace(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@QueryParam("totalSize") Long totalSize) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (totalSize == null || totalSize <= 0) {
|
||||
return Response.status(Response.Status.BAD_REQUEST)
|
||||
.entity("Missing or invalid totalSize parameter").build();
|
||||
}
|
||||
|
||||
File uploadDir = new File("uploads-temp");
|
||||
if (!uploadDir.exists()) {
|
||||
uploadDir.mkdirs(); // ensure the folder exists
|
||||
}
|
||||
|
||||
long usableSpace = uploadDir.getUsableSpace();
|
||||
long requiredSpace = (long)(((double)totalSize) * 2.2); // estimate for chunks + merge
|
||||
|
||||
if (usableSpace < requiredSpace) {
|
||||
return Response.status(507).entity("Insufficient disk space").build();
|
||||
}
|
||||
|
||||
return Response.ok("Sufficient disk space").build();
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/chunk")
|
||||
@Consumes(MediaType.MULTIPART_FORM_DATA)
|
||||
@Operation(
|
||||
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource (no identifier)",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.MULTIPART_FORM_DATA,
|
||||
schema = @Schema(
|
||||
implementation = Object.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Chunk uploaded successfully",
|
||||
responseCode = "200"
|
||||
),
|
||||
@ApiResponse(
|
||||
description = "Error writing chunk",
|
||||
responseCode = "500"
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response uploadChunkNoIdentifier(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@FormDataParam("chunk") InputStream chunkStream,
|
||||
@FormDataParam("index") int index) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
|
||||
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName);
|
||||
Files.createDirectories(tempDir);
|
||||
|
||||
|
||||
|
||||
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
|
||||
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
return Response.ok("Chunk " + index + " received").build();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to write chunk {} for service '{}' and name '{}'", index, serviceString, name, e);
|
||||
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/finalize")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Finalize a chunked upload (no identifier) and build a raw, unsigned, ARBITRARY transaction",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String finalizeUploadNoIdentifier(
|
||||
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
@QueryParam("isZip") Boolean isZip
|
||||
) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
java.nio.file.Path tempFile = null;
|
||||
java.nio.file.Path tempDir = null;
|
||||
java.nio.file.Path chunkDir = null;
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
|
||||
|
||||
|
||||
try {
|
||||
chunkDir = Paths.get("uploads-temp", safeService, safeName);
|
||||
|
||||
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
|
||||
}
|
||||
|
||||
String safeFilename = (filename == null || filename.isBlank()) ? "qortal-" + NTP.getTime() : filename;
|
||||
tempDir = Files.createTempDirectory("qortal-");
|
||||
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
|
||||
tempFile = tempDir.resolve(sanitizedFilename);
|
||||
|
||||
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
|
||||
byte[] buffer = new byte[65536];
|
||||
for (java.nio.file.Path chunk : Files.list(chunkDir)
|
||||
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
|
||||
.sorted(Comparator.comparingInt(path -> {
|
||||
String name2 = path.getFileName().toString();
|
||||
String numberPart = name2.substring("chunk_".length());
|
||||
return Integer.parseInt(numberPart);
|
||||
})).collect(Collectors.toList())) {
|
||||
try (InputStream in = Files.newInputStream(chunk)) {
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String detectedExtension = "";
|
||||
String uploadFilename = null;
|
||||
boolean extensionIsValid = false;
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
if (lastDot > 0 && lastDot < filename.length() - 1) {
|
||||
extensionIsValid = true;
|
||||
uploadFilename = filename;
|
||||
}
|
||||
}
|
||||
|
||||
if (!extensionIsValid) {
|
||||
Tika tika = new Tika();
|
||||
String mimeType = tika.detect(tempFile.toFile());
|
||||
try {
|
||||
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
|
||||
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
|
||||
detectedExtension = mime.getExtension();
|
||||
} catch (MimeTypeException e) {
|
||||
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
|
||||
}
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
|
||||
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
|
||||
} else {
|
||||
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
|
||||
}
|
||||
}
|
||||
|
||||
Boolean isZipBoolean = false;
|
||||
|
||||
if (isZip != null && isZip) {
|
||||
isZipBoolean = true;
|
||||
}
|
||||
|
||||
|
||||
// ✅ Call upload with `null` as identifier
|
||||
return this.upload(
|
||||
Service.valueOf(serviceString),
|
||||
name,
|
||||
null, // no identifier
|
||||
tempFile.toString(),
|
||||
null,
|
||||
null,
|
||||
isZipBoolean,
|
||||
fee,
|
||||
uploadFilename,
|
||||
title,
|
||||
description,
|
||||
tags,
|
||||
category,
|
||||
preview
|
||||
);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to merge chunks for service='{}', name='{}'", serviceString, name, e);
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
|
||||
} finally {
|
||||
if (tempDir != null) {
|
||||
try {
|
||||
Files.walk(tempDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Files.walk(chunkDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/{identifier}/chunk")
|
||||
@Consumes(MediaType.MULTIPART_FORM_DATA)
|
||||
@Operation(
|
||||
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.MULTIPART_FORM_DATA,
|
||||
schema = @Schema(
|
||||
implementation = Object.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Chunk uploaded successfully",
|
||||
responseCode = "200"
|
||||
),
|
||||
@ApiResponse(
|
||||
description = "Error writing chunk",
|
||||
responseCode = "500"
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response uploadChunk(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@FormDataParam("chunk") InputStream chunkStream,
|
||||
@FormDataParam("index") int index) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
String safeIdentifier = Paths.get(identifier).getFileName().toString();
|
||||
|
||||
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName, safeIdentifier);
|
||||
|
||||
Files.createDirectories(tempDir);
|
||||
|
||||
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
|
||||
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
return Response.ok("Chunk " + index + " received").build();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to write chunk {} for service='{}', name='{}', identifier='{}'", index, serviceString, name, identifier, e);
|
||||
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/{identifier}/finalize")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Finalize a chunked upload and build a raw, unsigned, ARBITRARY transaction",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String finalizeUpload(
|
||||
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
@QueryParam("isZip") Boolean isZip
|
||||
) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
java.nio.file.Path tempFile = null;
|
||||
java.nio.file.Path tempDir = null;
|
||||
java.nio.file.Path chunkDir = null;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
String safeIdentifier = Paths.get(identifier).getFileName().toString();
|
||||
java.nio.file.Path baseUploadsDir = Paths.get("uploads-temp"); // relative to Qortal working dir
|
||||
chunkDir = baseUploadsDir.resolve(safeService).resolve(safeName).resolve(safeIdentifier);
|
||||
|
||||
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
|
||||
}
|
||||
|
||||
// Step 1: Determine a safe filename for disk temp file (regardless of extension correctness)
|
||||
String safeFilename = filename;
|
||||
if (filename == null || filename.isBlank()) {
|
||||
safeFilename = "qortal-" + NTP.getTime();
|
||||
}
|
||||
|
||||
tempDir = Files.createTempDirectory("qortal-");
|
||||
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
|
||||
tempFile = tempDir.resolve(sanitizedFilename);
|
||||
|
||||
|
||||
// Step 2: Merge chunks
|
||||
|
||||
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
|
||||
byte[] buffer = new byte[65536];
|
||||
for (java.nio.file.Path chunk : Files.list(chunkDir)
|
||||
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
|
||||
.sorted(Comparator.comparingInt(path -> {
|
||||
String name2 = path.getFileName().toString();
|
||||
String numberPart = name2.substring("chunk_".length());
|
||||
return Integer.parseInt(numberPart);
|
||||
})).collect(Collectors.toList())) {
|
||||
try (InputStream in = Files.newInputStream(chunk)) {
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Step 3: Determine correct extension
|
||||
String detectedExtension = "";
|
||||
String uploadFilename = null;
|
||||
boolean extensionIsValid = false;
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
if (lastDot > 0 && lastDot < filename.length() - 1) {
|
||||
extensionIsValid = true;
|
||||
uploadFilename = filename;
|
||||
}
|
||||
}
|
||||
|
||||
if (!extensionIsValid) {
|
||||
Tika tika = new Tika();
|
||||
String mimeType = tika.detect(tempFile.toFile());
|
||||
try {
|
||||
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
|
||||
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
|
||||
detectedExtension = mime.getExtension();
|
||||
} catch (MimeTypeException e) {
|
||||
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
|
||||
}
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
|
||||
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
|
||||
} else {
|
||||
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Boolean isZipBoolean = false;
|
||||
|
||||
if (isZip != null && isZip) {
|
||||
isZipBoolean = true;
|
||||
}
|
||||
|
||||
|
||||
return this.upload(
|
||||
Service.valueOf(serviceString),
|
||||
name,
|
||||
identifier,
|
||||
tempFile.toString(),
|
||||
null,
|
||||
null,
|
||||
isZipBoolean,
|
||||
fee,
|
||||
uploadFilename,
|
||||
title,
|
||||
description,
|
||||
tags,
|
||||
category,
|
||||
preview
|
||||
);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Unexpected error in finalizeUpload for service='{}', name='{}', name='{}'", serviceString, name, identifier, e);
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
|
||||
} finally {
|
||||
if (tempDir != null) {
|
||||
try {
|
||||
Files.walk(tempDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Files.walk(chunkDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Upload base64-encoded data
|
||||
|
||||
@@ -1185,6 +1668,90 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/indices")
|
||||
@Operation(
|
||||
summary = "Find matching arbitrary resource indices",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "indices",
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ArbitraryDataIndexScorecard.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public List<ArbitraryDataIndexScorecard> searchIndices(@QueryParam("terms") String[] terms) {
|
||||
|
||||
List<ArbitraryDataIndexDetail> indices = new ArrayList<>();
|
||||
|
||||
// get index details for each term
|
||||
for( String term : terms ) {
|
||||
List<ArbitraryDataIndexDetail> details = IndexCache.getInstance().getIndicesByTerm().get(term);
|
||||
|
||||
if( details != null ) {
|
||||
indices.addAll(details);
|
||||
}
|
||||
}
|
||||
|
||||
// sum up the scores for each index with identical attributes
|
||||
Map<ArbitraryDataIndexScoreKey, Double> scoreForKey
|
||||
= indices.stream()
|
||||
.collect(
|
||||
Collectors.groupingBy(
|
||||
index -> new ArbitraryDataIndexScoreKey(index.name, index.category, index.link),
|
||||
Collectors.summingDouble(detail -> 1.0 / detail.rank)
|
||||
)
|
||||
);
|
||||
|
||||
// create scorecards for each index group and put them in descending order by score
|
||||
List<ArbitraryDataIndexScorecard> scorecards
|
||||
= scoreForKey.entrySet().stream().map(
|
||||
entry
|
||||
->
|
||||
new ArbitraryDataIndexScorecard(
|
||||
entry.getValue(),
|
||||
entry.getKey().name,
|
||||
entry.getKey().category,
|
||||
entry.getKey().link)
|
||||
)
|
||||
.sorted(Comparator.comparingDouble(ArbitraryDataIndexScorecard::getScore).reversed())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return scorecards;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/indices/{name}/{idPrefix}")
|
||||
@Operation(
|
||||
summary = "Find matching arbitrary resource indices for a registered name and identifier prefix",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "indices",
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ArbitraryDataIndexDetail.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public List<ArbitraryDataIndexDetail> searchIndicesByName(@PathParam("name") String name, @PathParam("idPrefix") String idPrefix) {
|
||||
|
||||
return
|
||||
IndexCache.getInstance().getIndicesByIssuer()
|
||||
.getOrDefault(name, new ArrayList<>(0)).stream()
|
||||
.filter( indexDetail -> indexDetail.indexIdentifer.startsWith(idPrefix))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
// Shared methods
|
||||
|
||||
@@ -1251,7 +1818,7 @@ public class ArbitraryResource {
|
||||
if (path == null) {
|
||||
// See if we have a string instead
|
||||
if (string != null) {
|
||||
if (filename == null) {
|
||||
if (filename == null || filename.isBlank()) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
@@ -1266,7 +1833,7 @@ public class ArbitraryResource {
|
||||
}
|
||||
// ... or base64 encoded raw data
|
||||
else if (base64 != null) {
|
||||
if (filename == null) {
|
||||
if (filename == null || filename.isBlank()) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
@@ -1317,6 +1884,7 @@ public class ArbitraryResource {
|
||||
);
|
||||
|
||||
transactionBuilder.build();
|
||||
|
||||
// Don't compute nonce - this is done by the client (or via POST /arbitrary/compute)
|
||||
ArbitraryTransactionData transactionData = transactionBuilder.getArbitraryTransactionData();
|
||||
return Base58.encode(ArbitraryTransactionTransformer.toBytes(transactionData));
|
||||
@@ -1332,22 +1900,20 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
private HttpServletResponse download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts) {
|
||||
|
||||
private void download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts, boolean attachment, String attachmentFilename) {
|
||||
try {
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
|
||||
|
||||
int attempts = 0;
|
||||
if (maxAttempts == null) {
|
||||
maxAttempts = 5;
|
||||
}
|
||||
|
||||
|
||||
// Loop until we have data
|
||||
if (async) {
|
||||
// Asynchronous
|
||||
arbitraryDataReader.loadAsynchronously(false, 1);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Synchronous
|
||||
while (!Controller.isStopping()) {
|
||||
attempts++;
|
||||
@@ -1357,88 +1923,189 @@ public class ArbitraryResource {
|
||||
break;
|
||||
} catch (MissingDataException e) {
|
||||
if (attempts > maxAttempts) {
|
||||
// Give up after 5 attempts
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later.");
|
||||
}
|
||||
}
|
||||
}
|
||||
Thread.sleep(3000L);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
|
||||
if (outputPath == null) {
|
||||
// Assume the resource doesn't exist
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
|
||||
}
|
||||
|
||||
|
||||
if (filepath == null || filepath.isEmpty()) {
|
||||
// No file path supplied - so check if this is a single file resource
|
||||
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
|
||||
if (files != null && files.length == 1) {
|
||||
// This is a single file resource
|
||||
filepath = files[0];
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA,
|
||||
"filepath is required for resources containing more than one file");
|
||||
} else {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "filepath is required for resources containing more than one file");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
|
||||
if (!Files.exists(path)) {
|
||||
String message = String.format("No file exists at filepath: %s", filepath);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No file exists at filepath: " + filepath);
|
||||
}
|
||||
|
||||
byte[] data;
|
||||
int fileSize = (int)path.toFile().length();
|
||||
int length = fileSize;
|
||||
|
||||
// Parse "Range" header
|
||||
Integer rangeStart = null;
|
||||
Integer rangeEnd = null;
|
||||
|
||||
if (attachment) {
|
||||
String rawFilename;
|
||||
|
||||
if (attachmentFilename != null && !attachmentFilename.isEmpty()) {
|
||||
// 1. Sanitize first
|
||||
String safeAttachmentFilename = attachmentFilename.replaceAll("[\\\\/:*?\"<>|]", "_");
|
||||
|
||||
// 2. Check for a valid extension (3–5 alphanumeric chars)
|
||||
if (!safeAttachmentFilename.matches(".*\\.[a-zA-Z0-9]{2,5}$")) {
|
||||
safeAttachmentFilename += ".bin";
|
||||
}
|
||||
|
||||
rawFilename = safeAttachmentFilename;
|
||||
} else {
|
||||
// Fallback if no filename is provided
|
||||
String baseFilename = (identifier != null && !identifier.isEmpty())
|
||||
? name + "-" + identifier
|
||||
: name;
|
||||
rawFilename = baseFilename.replaceAll("[\\\\/:*?\"<>|]", "_") + ".bin";
|
||||
}
|
||||
|
||||
// Optional: trim length
|
||||
rawFilename = rawFilename.length() > 100 ? rawFilename.substring(0, 100) : rawFilename;
|
||||
|
||||
// 3. Set Content-Disposition header
|
||||
response.setHeader("Content-Disposition", "attachment; filename=\"" + rawFilename + "\"");
|
||||
}
|
||||
|
||||
// Determine the total size of the requested file
|
||||
long fileSize = Files.size(path);
|
||||
String mimeType = context.getMimeType(path.toString());
|
||||
|
||||
// Attempt to read the "Range" header from the request to support partial content delivery (e.g., for video streaming or resumable downloads)
|
||||
String range = request.getHeader("Range");
|
||||
if (range != null) {
|
||||
range = range.replace("bytes=", "");
|
||||
String[] parts = range.split("-");
|
||||
rangeStart = (parts != null && parts.length > 0) ? Integer.parseInt(parts[0]) : null;
|
||||
rangeEnd = (parts != null && parts.length > 1) ? Integer.parseInt(parts[1]) : fileSize;
|
||||
|
||||
long rangeStart = 0;
|
||||
long rangeEnd = fileSize - 1;
|
||||
boolean isPartial = false;
|
||||
|
||||
// If a Range header is present and no base64 encoding is requested, parse the range values
|
||||
if (range != null && encoding == null) {
|
||||
range = range.replace("bytes=", ""); // Remove the "bytes=" prefix
|
||||
String[] parts = range.split("-"); // Split the range into start and end
|
||||
|
||||
// Parse range start
|
||||
if (parts.length > 0 && !parts[0].isEmpty()) {
|
||||
rangeStart = Long.parseLong(parts[0]);
|
||||
}
|
||||
|
||||
// Parse range end, if present
|
||||
if (parts.length > 1 && !parts[1].isEmpty()) {
|
||||
rangeEnd = Long.parseLong(parts[1]);
|
||||
}
|
||||
|
||||
isPartial = true; // Indicate that this is a partial content request
|
||||
}
|
||||
|
||||
if (rangeStart != null && rangeEnd != null) {
|
||||
// We have a range, so update the requested length
|
||||
length = rangeEnd - rangeStart;
|
||||
|
||||
// Calculate how many bytes should be sent in the response
|
||||
long contentLength = rangeEnd - rangeStart + 1;
|
||||
|
||||
// Inform the client that byte ranges are supported
|
||||
response.setHeader("Accept-Ranges", "bytes");
|
||||
|
||||
if (isPartial) {
|
||||
// If partial content was requested, return 206 Partial Content with appropriate headers
|
||||
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
|
||||
response.setHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd, fileSize));
|
||||
} else {
|
||||
// Otherwise, return the entire file with status 200 OK
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
}
|
||||
|
||||
if (length < fileSize && encoding == null) {
|
||||
// Partial content requested, and not encoding the data
|
||||
response.setStatus(206);
|
||||
response.addHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd-1, fileSize));
|
||||
data = FilesystemUtils.readFromFile(path.toString(), rangeStart, length);
|
||||
|
||||
// Initialize output streams for writing the file to the response
|
||||
OutputStream rawOut = null;
|
||||
OutputStream base64Out = null;
|
||||
OutputStream gzipOut = null;
|
||||
|
||||
try {
|
||||
rawOut = response.getOutputStream();
|
||||
|
||||
if (encoding != null && "base64".equalsIgnoreCase(encoding)) {
|
||||
// If base64 encoding is requested, override content type
|
||||
response.setContentType("text/plain");
|
||||
|
||||
// Check if the client accepts gzip encoding
|
||||
String acceptEncoding = request.getHeader("Accept-Encoding");
|
||||
boolean wantsGzip = acceptEncoding != null && acceptEncoding.contains("gzip");
|
||||
|
||||
if (wantsGzip) {
|
||||
// Wrap output in GZIP and Base64 streams if gzip is accepted
|
||||
response.setHeader("Content-Encoding", "gzip");
|
||||
gzipOut = new GZIPOutputStream(rawOut);
|
||||
base64Out = java.util.Base64.getEncoder().wrap(gzipOut);
|
||||
} else {
|
||||
// Wrap output in Base64 only
|
||||
base64Out = java.util.Base64.getEncoder().wrap(rawOut);
|
||||
}
|
||||
|
||||
rawOut = base64Out; // Use the wrapped stream for writing
|
||||
} else {
|
||||
// For raw binary output, set the content type and length
|
||||
response.setContentType(mimeType != null ? mimeType : "application/octet-stream");
|
||||
response.setContentLength((int) contentLength);
|
||||
}
|
||||
|
||||
// Stream file content
|
||||
try (InputStream inputStream = Files.newInputStream(path)) {
|
||||
if (rangeStart > 0) {
|
||||
inputStream.skip(rangeStart);
|
||||
}
|
||||
|
||||
byte[] buffer = new byte[65536];
|
||||
long bytesRemaining = contentLength;
|
||||
int bytesRead;
|
||||
|
||||
while (bytesRemaining > 0 && (bytesRead = inputStream.read(buffer, 0, (int) Math.min(buffer.length, bytesRemaining))) != -1) {
|
||||
rawOut.write(buffer, 0, bytesRead);
|
||||
bytesRemaining -= bytesRead;
|
||||
}
|
||||
}
|
||||
|
||||
// Stream finished
|
||||
if (base64Out != null) {
|
||||
base64Out.close(); // Also flushes and closes the wrapped gzipOut
|
||||
} else if (gzipOut != null) {
|
||||
gzipOut.close(); // Only close gzipOut if it wasn't wrapped by base64Out
|
||||
} else {
|
||||
rawOut.flush(); // Flush only the base output stream if nothing was wrapped
|
||||
}
|
||||
|
||||
if (!response.isCommitted()) {
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
response.getWriter().write(" ");
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
// Streaming errors should not rethrow — just log
|
||||
LOGGER.warn(String.format("Streaming error for %s %s: %s", service, name, e.getMessage()));
|
||||
}
|
||||
else {
|
||||
// Full content requested (or encoded data)
|
||||
response.setStatus(200);
|
||||
data = Files.readAllBytes(path); // TODO: limit file size that can be read into memory
|
||||
|
||||
} catch (IOException | ApiException | DataException e) {
|
||||
LOGGER.warn(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
|
||||
if (!response.isCommitted()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
|
||||
// Encode the data if requested
|
||||
if (encoding != null && Objects.equals(encoding.toLowerCase(), "base64")) {
|
||||
data = Base64.encode(data);
|
||||
} catch (NumberFormatException e) {
|
||||
LOGGER.warn(String.format("Invalid range for %s %s: %s", service, name, e.getMessage()));
|
||||
if (!response.isCommitted()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
|
||||
}
|
||||
|
||||
response.addHeader("Accept-Ranges", "bytes");
|
||||
response.setContentType(context.getMimeType(path.toString()));
|
||||
response.setContentLength(data.length);
|
||||
response.getOutputStream().write(data);
|
||||
|
||||
return response;
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private FileProperties getFileProperties(Service service, String name, String identifier) {
|
||||
try {
|
||||
|
@@ -16,9 +16,13 @@ import org.qortal.api.model.AggregatedOrder;
|
||||
import org.qortal.api.model.TradeWithOrderInfo;
|
||||
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.AddressAmountData;
|
||||
import org.qortal.data.account.BlockHeightRange;
|
||||
import org.qortal.data.account.BlockHeightRangeAddressAmounts;
|
||||
import org.qortal.data.asset.AssetData;
|
||||
import org.qortal.data.asset.OrderData;
|
||||
import org.qortal.data.asset.RecentTradeData;
|
||||
@@ -33,6 +37,7 @@ import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transaction.Transaction.ValidationResult;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.transaction.*;
|
||||
import org.qortal.utils.BalanceRecorderUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -42,6 +47,7 @@ import javax.ws.rs.core.MediaType;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Path("/assets")
|
||||
@@ -179,6 +185,122 @@ public class AssetsResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/balancedynamicranges")
|
||||
@Operation(
|
||||
summary = "Get balance dynamic ranges listed.",
|
||||
description = ".",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = BlockHeightRange.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public List<BlockHeightRange> getBalanceDynamicRanges(
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
|
||||
|
||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
||||
|
||||
if( recorder.isPresent()) {
|
||||
return recorder.get().getRanges(offset, limit, reverse);
|
||||
}
|
||||
else {
|
||||
return new ArrayList<>(0);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/balancedynamicrange/{height}")
|
||||
@Operation(
|
||||
summary = "Get balance dynamic range for a given height.",
|
||||
description = ".",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
implementation = BlockHeightRange.class
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({
|
||||
ApiError.INVALID_CRITERIA, ApiError.INVALID_DATA
|
||||
})
|
||||
public BlockHeightRange getBalanceDynamicRange(@PathParam("height") int height) {
|
||||
|
||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
||||
|
||||
if( recorder.isPresent()) {
|
||||
Optional<BlockHeightRange> range = recorder.get().getRange(height);
|
||||
|
||||
if( range.isPresent() ) {
|
||||
return range.get();
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/balancedynamicamounts/{begin}/{end}")
|
||||
@Operation(
|
||||
summary = "Get balance dynamic ranges address amounts listed.",
|
||||
description = ".",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = AddressAmountData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({
|
||||
ApiError.INVALID_CRITERIA, ApiError.INVALID_DATA
|
||||
})
|
||||
public List<AddressAmountData> getBalanceDynamicAddressAmounts(
|
||||
@PathParam("begin") int begin,
|
||||
@PathParam("end") int end,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit) {
|
||||
|
||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
||||
|
||||
if( recorder.isPresent()) {
|
||||
Optional<BlockHeightRangeAddressAmounts> addressAmounts = recorder.get().getAddressAmounts(new BlockHeightRange(begin, end, false));
|
||||
|
||||
if( addressAmounts.isPresent() ) {
|
||||
return addressAmounts.get().getAmounts().stream()
|
||||
.sorted(BalanceRecorderUtils.ADDRESS_AMOUNT_DATA_COMPARATOR.reversed())
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/openorders/{assetid}/{otherassetid}")
|
||||
@Operation(
|
||||
|
@@ -19,6 +19,8 @@ import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.block.DecodedOnlineAccountData;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.BlockArchiveReader;
|
||||
import org.qortal.repository.DataException;
|
||||
@@ -27,6 +29,7 @@ import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Blocks;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -45,6 +48,7 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
@Path("/blocks")
|
||||
@Tag(name = "Blocks")
|
||||
@@ -542,6 +546,7 @@ public class BlocksResource {
|
||||
}
|
||||
}
|
||||
|
||||
String minterAddress = Account.getRewardShareMintingAddress(repository, blockData.getMinterPublicKey());
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
if (minterLevel == 0)
|
||||
// This may be unavailable when requesting a trimmed block
|
||||
@@ -554,6 +559,7 @@ public class BlocksResource {
|
||||
|
||||
BlockMintingInfo blockMintingInfo = new BlockMintingInfo();
|
||||
blockMintingInfo.minterPublicKey = blockData.getMinterPublicKey();
|
||||
blockMintingInfo.minterAddress = minterAddress;
|
||||
blockMintingInfo.minterLevel = minterLevel;
|
||||
blockMintingInfo.onlineAccountsCount = blockData.getOnlineAccountsCount();
|
||||
blockMintingInfo.maxDistance = new BigDecimal(block.MAX_DISTANCE);
|
||||
@@ -888,4 +894,49 @@ public class BlocksResource {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@GET
|
||||
@Path("/onlineaccounts/{height}")
|
||||
@Operation(
|
||||
summary = "Get online accounts for block",
|
||||
description = "Returns the online accounts who submitted signatures for this block",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "online accounts",
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = DecodedOnlineAccountData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({
|
||||
ApiError.BLOCK_UNKNOWN, ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public Set<DecodedOnlineAccountData> getOnlineAccounts(@PathParam("height") int height) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// get block from database
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
|
||||
// if block data is not in the database, then try the archive
|
||||
if (blockData == null) {
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
|
||||
// if the block is not in the database or the archive, then the block is unknown
|
||||
if( blockData == null ) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
Set<DecodedOnlineAccountData> onlineAccounts = Blocks.getDecodedOnlineAccountsForBlock(repository, blockData);
|
||||
|
||||
return onlineAccounts;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
|
||||
}
|
||||
}
|
||||
}
|
@@ -234,17 +234,21 @@ public class ChatResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
|
||||
public ActiveChats getActiveChats(
|
||||
@PathParam("address") String address,
|
||||
@QueryParam("encoding") Encoding encoding,
|
||||
@QueryParam("haschatreference") Boolean hasChatReference
|
||||
) {
|
||||
if (address == null || !Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getChatRepository().getActiveChats(address, encoding);
|
||||
return repository.getChatRepository().getActiveChats(address, encoding, hasChatReference);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@POST
|
||||
@Operation(
|
||||
summary = "Build raw, unsigned, CHAT transaction",
|
||||
|
@@ -502,10 +502,10 @@ public class CrossChainBitcoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Bitcoin fee per Kb.",
|
||||
description = "Returns Bitcoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking BTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -516,17 +516,17 @@ public class CrossChainBitcoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getBitcoinFeeCeiling() {
|
||||
public String getBitcoinFeeRequired() {
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
return String.valueOf(bitcoin.getFeeCeiling());
|
||||
return String.valueOf(bitcoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Bitcoin fee ceiling.",
|
||||
description = "Sets Bitcoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking BTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -545,13 +545,13 @@ public class CrossChainBitcoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setBitcoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setBitcoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(bitcoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(bitcoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -502,10 +502,10 @@ public class CrossChainDigibyteResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Digibyte fee per Kb.",
|
||||
description = "Returns Digibyte fee per Kb.",
|
||||
summary = "The total fee required for unlocking DGB to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -516,17 +516,17 @@ public class CrossChainDigibyteResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getDigibyteFeeCeiling() {
|
||||
public String getDigibyteFeeRequired() {
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
return String.valueOf(digibyte.getFeeCeiling());
|
||||
return String.valueOf(digibyte.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Digibyte fee ceiling.",
|
||||
description = "Sets Digibyte fee ceiling.",
|
||||
summary = "The total fee required for unlocking DGB to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -545,13 +545,13 @@ public class CrossChainDigibyteResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setDigibyteFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setDigibyteFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(digibyte, fee);
|
||||
return CrossChainUtils.setFeeRequired(digibyte, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -502,10 +502,10 @@ public class CrossChainDogecoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Dogecoin fee per Kb.",
|
||||
description = "Returns Dogecoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -516,17 +516,17 @@ public class CrossChainDogecoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getDogecoinFeeCeiling() {
|
||||
public String getDogecoinFeeRequired() {
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
return String.valueOf(dogecoin.getFeeCeiling());
|
||||
return String.valueOf(dogecoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Dogecoin fee ceiling.",
|
||||
description = "Sets Dogecoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -545,13 +545,13 @@ public class CrossChainDogecoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setDogecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setDogecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(dogecoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(dogecoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -540,10 +540,10 @@ public class CrossChainLitecoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Litecoin fee per Kb.",
|
||||
description = "Returns Litecoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking LTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -554,17 +554,17 @@ public class CrossChainLitecoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getLitecoinFeeCeiling() {
|
||||
public String getLitecoinFeeRequired() {
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
return String.valueOf(litecoin.getFeeCeiling());
|
||||
return String.valueOf(litecoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Litecoin fee ceiling.",
|
||||
description = "Sets Litecoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking LTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -583,13 +583,13 @@ public class CrossChainLitecoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setLitecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setLitecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(litecoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(litecoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -587,10 +587,10 @@ public class CrossChainPirateChainResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns PirateChain fee per Kb.",
|
||||
description = "Returns PirateChain fee per Kb.",
|
||||
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
description = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -601,17 +601,17 @@ public class CrossChainPirateChainResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getPirateChainFeeCeiling() {
|
||||
public String getPirateChainFeeRequired() {
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
return String.valueOf(pirateChain.getFeeCeiling());
|
||||
return String.valueOf(pirateChain.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets PirateChain fee ceiling.",
|
||||
description = "Sets PirateChain fee ceiling.",
|
||||
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -630,13 +630,13 @@ public class CrossChainPirateChainResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setPirateChainFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setPirateChainFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(pirateChain, fee);
|
||||
return CrossChainUtils.setFeeRequired(pirateChain, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -502,10 +502,10 @@ public class CrossChainRavencoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Ravencoin fee per Kb.",
|
||||
description = "Returns Ravencoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking RVN to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@@ -516,17 +516,17 @@ public class CrossChainRavencoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getRavencoinFeeCeiling() {
|
||||
public String getRavencoinFeeRequired() {
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
return String.valueOf(ravencoin.getFeeCeiling());
|
||||
return String.valueOf(ravencoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Ravencoin fee ceiling.",
|
||||
description = "Sets Ravencoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking RVN to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@@ -545,13 +545,13 @@ public class CrossChainRavencoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setRavencoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setRavencoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(ravencoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(ravencoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@@ -10,12 +10,17 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.glassfish.jersey.media.multipart.ContentDisposition;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.CrossChainCancelRequest;
|
||||
import org.qortal.api.model.CrossChainTradeLedgerEntry;
|
||||
import org.qortal.api.model.CrossChainTradeSummary;
|
||||
import org.qortal.controller.ForeignFeesManager;
|
||||
import org.qortal.controller.tradebot.TradeBot;
|
||||
import org.qortal.crosschain.ACCT;
|
||||
import org.qortal.crosschain.AcctMode;
|
||||
@@ -27,6 +32,8 @@ import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
import org.qortal.data.crosschain.TransactionSummary;
|
||||
import org.qortal.data.crosschain.ForeignFeeDecodedData;
|
||||
import org.qortal.data.crosschain.ForeignFeeEncodedData;
|
||||
import org.qortal.data.transaction.BaseTransactionData;
|
||||
import org.qortal.data.transaction.MessageTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@@ -44,21 +51,36 @@ import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.ByteArray;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
|
||||
@Path("/crosschain")
|
||||
@Tag(name = "Cross-Chain")
|
||||
public class CrossChainResource {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(CrossChainResource.class);
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@Context
|
||||
HttpServletResponse response;
|
||||
|
||||
@Context
|
||||
ServletContext context;
|
||||
|
||||
|
||||
@GET
|
||||
@Path("/tradeoffers")
|
||||
@Operation(
|
||||
@@ -255,6 +277,12 @@ public class CrossChainResource {
|
||||
description = "Only return trades that completed on/after this timestamp (milliseconds since epoch)",
|
||||
example = "1597310000000"
|
||||
) @QueryParam("minimumTimestamp") Long minimumTimestamp,
|
||||
@Parameter(
|
||||
description = "Optionally filter by buyer Qortal public key"
|
||||
) @QueryParam("buyerPublicKey") String buyerPublicKey58,
|
||||
@Parameter(
|
||||
description = "Optionally filter by seller Qortal public key"
|
||||
) @QueryParam("sellerPublicKey") String sellerPublicKey58,
|
||||
@Parameter( ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter( ref = "offset" ) @QueryParam("offset") Integer offset,
|
||||
@Parameter( ref = "reverse" ) @QueryParam("reverse") Boolean reverse) {
|
||||
@@ -266,6 +294,10 @@ public class CrossChainResource {
|
||||
if (minimumTimestamp != null && minimumTimestamp <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
// Decode public keys
|
||||
byte[] buyerPublicKey = decodePublicKey(buyerPublicKey58);
|
||||
byte[] sellerPublicKey = decodePublicKey(sellerPublicKey58);
|
||||
|
||||
final Boolean isFinished = Boolean.TRUE;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@@ -296,7 +328,7 @@ public class CrossChainResource {
|
||||
byte[] codeHash = acctInfo.getKey().value;
|
||||
ACCT acct = acctInfo.getValue().get();
|
||||
|
||||
List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
|
||||
List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, buyerPublicKey, sellerPublicKey,
|
||||
isFinished, acct.getModeByteOffset(), (long) AcctMode.REDEEMED.value, minimumFinalHeight,
|
||||
limit, offset, reverse);
|
||||
|
||||
@@ -335,6 +367,215 @@ public class CrossChainResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/signedfees")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeEncodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "true on success",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "boolean"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public String postSignedForeignFees(List<ForeignFeeEncodedData> signedFees) {
|
||||
|
||||
LOGGER.info("signedFees = " + signedFees);
|
||||
|
||||
try {
|
||||
ForeignFeesManager.getInstance().addSignedFees(signedFees);
|
||||
|
||||
return "true";
|
||||
}
|
||||
catch( Exception e ) {
|
||||
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
|
||||
return "false";
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/unsignedfees/{address}")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeEncodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
|
||||
public List<ForeignFeeEncodedData> getUnsignedFees(@PathParam("address") String address) {
|
||||
|
||||
List<ForeignFeeEncodedData> unsignedFeesForAddress = ForeignFeesManager.getInstance().getUnsignedFeesForAddress(address);
|
||||
|
||||
LOGGER.info("address = " + address);
|
||||
LOGGER.info("returning unsigned = " + unsignedFeesForAddress);
|
||||
return unsignedFeesForAddress;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/signedfees")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeDecodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
|
||||
public List<ForeignFeeDecodedData> getSignedFees() {
|
||||
|
||||
return ForeignFeesManager.getInstance().getSignedFees();
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode Public Key
|
||||
*
|
||||
* @param publicKey58 the public key in a string
|
||||
*
|
||||
* @return the public key in bytes
|
||||
*/
|
||||
private byte[] decodePublicKey(String publicKey58) {
|
||||
|
||||
if( publicKey58 == null ) return null;
|
||||
if( publicKey58.isEmpty() ) return new byte[0];
|
||||
|
||||
byte[] publicKey;
|
||||
try {
|
||||
publicKey = Base58.decode(publicKey58);
|
||||
} catch (NumberFormatException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY, e);
|
||||
}
|
||||
|
||||
// Correct size for public key?
|
||||
if (publicKey.length != Transformer.PUBLIC_KEY_LENGTH)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY);
|
||||
|
||||
return publicKey;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/ledger/{publicKey}")
|
||||
@Operation(
|
||||
summary = "Accounting entries for all trades.",
|
||||
description = "Returns accounting entries for all completed cross-chain trades",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
format = "byte"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
|
||||
public HttpServletResponse getLedgerEntries(
|
||||
@PathParam("publicKey") String publicKey58,
|
||||
@Parameter(
|
||||
description = "Only return trades that completed on/after this timestamp (milliseconds since epoch)",
|
||||
example = "1597310000000"
|
||||
) @QueryParam("minimumTimestamp") Long minimumTimestamp) {
|
||||
|
||||
byte[] publicKey = decodePublicKey(publicKey58);
|
||||
|
||||
// minimumTimestamp (if given) needs to be positive
|
||||
if (minimumTimestamp != null && minimumTimestamp <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Integer minimumFinalHeight = null;
|
||||
|
||||
if (minimumTimestamp != null) {
|
||||
minimumFinalHeight = repository.getBlockRepository().getHeightFromTimestamp(minimumTimestamp);
|
||||
// If not found in the block repository it will return either 0 or 1
|
||||
if (minimumFinalHeight == 0 || minimumFinalHeight == 1) {
|
||||
// Try the archive
|
||||
minimumFinalHeight = repository.getBlockArchiveRepository().getHeightFromTimestamp(minimumTimestamp);
|
||||
}
|
||||
|
||||
if (minimumFinalHeight == 0)
|
||||
// We don't have any blocks since minimumTimestamp, let alone trades, so nothing to return
|
||||
return response;
|
||||
|
||||
// height returned from repository is for block BEFORE timestamp
|
||||
// but we want trades AFTER timestamp so bump height accordingly
|
||||
minimumFinalHeight++;
|
||||
}
|
||||
|
||||
List<CrossChainTradeLedgerEntry> crossChainTradeLedgerEntries = new ArrayList<>();
|
||||
|
||||
Map<ByteArray, Supplier<ACCT>> acctsByCodeHash = SupportedBlockchain.getAcctMap();
|
||||
|
||||
// collect ledger entries for each ACCT
|
||||
for (Map.Entry<ByteArray, Supplier<ACCT>> acctInfo : acctsByCodeHash.entrySet()) {
|
||||
byte[] codeHash = acctInfo.getKey().value;
|
||||
ACCT acct = acctInfo.getValue().get();
|
||||
|
||||
// collect buys and sells
|
||||
CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, true);
|
||||
CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, false);
|
||||
}
|
||||
|
||||
crossChainTradeLedgerEntries.sort((a, b) -> Longs.compare(a.getTradeTimestamp(), b.getTradeTimestamp()));
|
||||
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
response.setContentType("text/csv");
|
||||
response.setHeader(
|
||||
HttpHeaders.CONTENT_DISPOSITION,
|
||||
ContentDisposition
|
||||
.type("attachment")
|
||||
.fileName(CrossChainUtils.createLedgerFileName(Crypto.toAddress(publicKey)))
|
||||
.build()
|
||||
.toString()
|
||||
);
|
||||
|
||||
CrossChainUtils.writeToLedger( response.getWriter(), crossChainTradeLedgerEntries);
|
||||
|
||||
return response;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
} catch (IOException e) {
|
||||
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/price/{blockchain}")
|
||||
@Operation(
|
||||
|
@@ -10,21 +10,37 @@ import org.bitcoinj.script.ScriptBuilder;
|
||||
|
||||
import org.bouncycastle.util.Strings;
|
||||
import org.json.simple.JSONObject;
|
||||
import org.qortal.api.model.CrossChainTradeLedgerEntry;
|
||||
import org.qortal.api.model.crosschain.BitcoinyTBDRequest;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.crosschain.*;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.crosschain.*;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.LockingFeeUpdateEvent;
|
||||
import org.qortal.event.RequiredFeeUpdateEvent;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.utils.Amounts;
|
||||
import org.qortal.utils.BitTwiddling;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.*;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
public class CrossChainUtils {
|
||||
public static final String QORT_CURRENCY_CODE = "QORT";
|
||||
private static final Logger LOGGER = LogManager.getLogger(CrossChainUtils.class);
|
||||
public static final String CORE_API_CALL = "Core API Call";
|
||||
public static final String QORTAL_EXCHANGE_LABEL = "Qortal";
|
||||
|
||||
public static ServerConfigurationInfo buildServerConfigurationInfo(Bitcoiny blockchain) {
|
||||
|
||||
@@ -88,11 +104,13 @@ public class CrossChainUtils {
|
||||
|
||||
bitcoiny.setFeePerKb(Coin.valueOf(satoshis) );
|
||||
|
||||
EventBus.INSTANCE.notify(new LockingFeeUpdateEvent());
|
||||
|
||||
return String.valueOf(bitcoiny.getFeePerKb().value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Fee Ceiling
|
||||
* Set Fee Required
|
||||
*
|
||||
* @param bitcoiny the blockchain support
|
||||
* @param fee the fee in satoshis
|
||||
@@ -101,14 +119,16 @@ public class CrossChainUtils {
|
||||
*
|
||||
* @throws IllegalArgumentException if invalid
|
||||
*/
|
||||
public static String setFeeCeiling(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
|
||||
public static String setFeeRequired(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
|
||||
|
||||
long satoshis = Long.parseLong(fee);
|
||||
if( satoshis < 0 ) throw new IllegalArgumentException("can't set fee to negative number");
|
||||
|
||||
bitcoiny.setFeeCeiling( Long.parseLong(fee));
|
||||
bitcoiny.setFeeRequired( Long.parseLong(fee));
|
||||
|
||||
return String.valueOf(bitcoiny.getFeeCeiling());
|
||||
EventBus.INSTANCE.notify(new RequiredFeeUpdateEvent(bitcoiny));
|
||||
|
||||
return String.valueOf(bitcoiny.getFeeRequired());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -217,6 +237,9 @@ public class CrossChainUtils {
|
||||
return bitcoiny.getBlockchainProvider().removeServer(server);
|
||||
}
|
||||
|
||||
public static ChainableServer getCurrentServer( Bitcoiny bitcoiny ) {
|
||||
return bitcoiny.getBlockchainProvider().getCurrentServer();
|
||||
}
|
||||
/**
|
||||
* Set Current Server
|
||||
*
|
||||
@@ -632,4 +655,170 @@ public class CrossChainUtils {
|
||||
byte[] lockTimeABytes = BitTwiddling.toBEByteArray((long) lockTimeA);
|
||||
return Bytes.concat(partnerBitcoinPKH, hashOfSecretA, lockTimeABytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write To Ledger
|
||||
*
|
||||
* @param writer the writer to the ledger
|
||||
* @param entries the entries to write to the ledger
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void writeToLedger(Writer writer, List<CrossChainTradeLedgerEntry> entries) throws IOException {
|
||||
|
||||
BufferedWriter bufferedWriter = new BufferedWriter(writer);
|
||||
|
||||
StringJoiner header = new StringJoiner(",");
|
||||
header.add("Market");
|
||||
header.add("Currency");
|
||||
header.add("Quantity");
|
||||
header.add("Commission Paid");
|
||||
header.add("Commission Currency");
|
||||
header.add("Total Price");
|
||||
header.add("Date Time");
|
||||
header.add("Exchange");
|
||||
|
||||
bufferedWriter.append(header.toString());
|
||||
|
||||
DateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd HH:mm");
|
||||
dateFormatter.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
|
||||
for( CrossChainTradeLedgerEntry entry : entries ) {
|
||||
StringJoiner joiner = new StringJoiner(",");
|
||||
|
||||
joiner.add(entry.getMarket());
|
||||
joiner.add(entry.getCurrency());
|
||||
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getQuantity())));
|
||||
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getFeeAmount())));
|
||||
joiner.add(entry.getFeeCurrency());
|
||||
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getTotalPrice())));
|
||||
joiner.add(dateFormatter.format(new Date(entry.getTradeTimestamp())));
|
||||
joiner.add(QORTAL_EXCHANGE_LABEL);
|
||||
|
||||
bufferedWriter.newLine();
|
||||
bufferedWriter.append(joiner.toString());
|
||||
}
|
||||
|
||||
bufferedWriter.newLine();
|
||||
bufferedWriter.flush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Ledger File Name
|
||||
*
|
||||
* Create a file name the includes timestamp and address.
|
||||
*
|
||||
* @param address the address
|
||||
*
|
||||
* @return the file name created
|
||||
*/
|
||||
public static String createLedgerFileName(String address) {
|
||||
DateFormat dateFormatter = new SimpleDateFormat("yyyyMMddHHmmss");
|
||||
String fileName = "ledger-" + address + "-" + dateFormatter.format(new Date());
|
||||
return fileName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect Ledger Entries
|
||||
*
|
||||
* @param publicKey the public key for the ledger entries, buy and sell
|
||||
* @param repository the data repository
|
||||
* @param minimumFinalHeight the minimum block height for entries to be collected
|
||||
* @param entries the ledger entries to add to
|
||||
* @param codeHash code hash for the entry blockchain
|
||||
* @param acct the ACCT for the entry blockchain
|
||||
* @param isBuy true collecting entries for a buy, otherwise false
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void collectLedgerEntries(
|
||||
byte[] publicKey,
|
||||
Repository repository,
|
||||
Integer minimumFinalHeight,
|
||||
List<CrossChainTradeLedgerEntry> entries,
|
||||
byte[] codeHash,
|
||||
ACCT acct,
|
||||
boolean isBuy) throws DataException {
|
||||
|
||||
// get all the final AT states for the code hash (foreign coin)
|
||||
List<ATStateData> atStates
|
||||
= repository.getATRepository().getMatchingFinalATStates(
|
||||
codeHash,
|
||||
isBuy ? publicKey : null,
|
||||
!isBuy ? publicKey : null,
|
||||
Boolean.TRUE, acct.getModeByteOffset(),
|
||||
(long) AcctMode.REDEEMED.value,
|
||||
minimumFinalHeight,
|
||||
null, null, false
|
||||
);
|
||||
|
||||
String foreignBlockchainCurrencyCode = acct.getBlockchain().getCurrencyCode();
|
||||
|
||||
// for each trade, build ledger entry, collect ledger entry
|
||||
for (ATStateData atState : atStates) {
|
||||
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atState);
|
||||
|
||||
// We also need block timestamp for use as trade timestamp
|
||||
long localTimestamp = repository.getBlockRepository().getTimestampFromHeight(atState.getHeight());
|
||||
|
||||
if (localTimestamp == 0) {
|
||||
// Try the archive
|
||||
localTimestamp = repository.getBlockArchiveRepository().getTimestampFromHeight(atState.getHeight());
|
||||
}
|
||||
|
||||
CrossChainTradeLedgerEntry ledgerEntry
|
||||
= new CrossChainTradeLedgerEntry(
|
||||
isBuy ? QORT_CURRENCY_CODE : foreignBlockchainCurrencyCode,
|
||||
isBuy ? foreignBlockchainCurrencyCode : QORT_CURRENCY_CODE,
|
||||
isBuy ? crossChainTradeData.qortAmount : crossChainTradeData.expectedForeignAmount,
|
||||
0,
|
||||
foreignBlockchainCurrencyCode,
|
||||
isBuy ? crossChainTradeData.expectedForeignAmount : crossChainTradeData.qortAmount,
|
||||
localTimestamp);
|
||||
|
||||
entries.add(ledgerEntry);
|
||||
}
|
||||
}
|
||||
|
||||
public static List<CrossChainTradeData> populateTradeDataList(Repository repository, ACCT acct, List<ATData> atDataList) throws DataException {
|
||||
|
||||
if(atDataList.isEmpty()) return new ArrayList<>(0);
|
||||
|
||||
List<ATStateData> latestATStates
|
||||
= repository.getATRepository()
|
||||
.getLatestATStates(
|
||||
atDataList.stream()
|
||||
.map(ATData::getATAddress)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
|
||||
Map<String, ATStateData> atStateDataByAtAddress
|
||||
= latestATStates.stream().collect(Collectors.toMap(ATStateData::getATAddress, Function.identity()));
|
||||
|
||||
Map<String, ATData> atDataByAtAddress
|
||||
= atDataList.stream().collect(Collectors.toMap(ATData::getATAddress, Function.identity()));
|
||||
|
||||
Map<String, Long> balanceByAtAddress
|
||||
= repository
|
||||
.getAccountRepository()
|
||||
.getBalances(new ArrayList<>(atDataByAtAddress.keySet()), Asset.QORT)
|
||||
.stream().collect(Collectors.toMap(AccountBalanceData::getAddress, AccountBalanceData::getBalance));
|
||||
|
||||
List<CrossChainTradeData> crossChainTradeDataList = new ArrayList<>(latestATStates.size());
|
||||
|
||||
for( ATStateData atStateData : latestATStates ) {
|
||||
ATData atData = atDataByAtAddress.get(atStateData.getATAddress());
|
||||
crossChainTradeDataList.add(
|
||||
acct.populateTradeData(
|
||||
repository,
|
||||
atData.getCreatorPublicKey(),
|
||||
atData.getCreation(),
|
||||
atStateData,
|
||||
OptionalLong.of(balanceByAtAddress.get(atStateData.getATAddress()))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
}
|
@@ -33,6 +33,7 @@ import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Path("/names")
|
||||
@@ -104,6 +105,45 @@ public class NamesResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/primary/{address}")
|
||||
@Operation(
|
||||
summary = "primary name owned by address",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "registered primary name info",
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(implementation = NameSummary.class)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE, ApiError.UNAUTHORIZED})
|
||||
public NameSummary getPrimaryNameByAddress(@PathParam("address") String address) {
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
|
||||
}
|
||||
else {
|
||||
Optional<String> primaryName = repository.getNameRepository().getPrimaryName(address);
|
||||
|
||||
if(primaryName.isPresent()) {
|
||||
return new NameSummary(new NameData(primaryName.get(), address));
|
||||
}
|
||||
else {
|
||||
return new NameSummary((new NameData(null, address)));
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/{name}")
|
||||
@Operation(
|
||||
|
@@ -32,6 +32,7 @@ import org.qortal.controller.Synchronizer.SynchronizationResult;
|
||||
import org.qortal.controller.repository.BlockArchiveRebuilder;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.system.DbConnectionInfo;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.PeerAddress;
|
||||
@@ -40,6 +41,7 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.data.system.SystemInfo;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
@@ -52,6 +54,7 @@ import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
@@ -1064,4 +1067,29 @@ public class AdminResource {
|
||||
return "true";
|
||||
}
|
||||
|
||||
}
|
||||
@GET
|
||||
@Path("/systeminfo")
|
||||
@Operation(
|
||||
summary = "System Information",
|
||||
description = "System memory usage and available processors.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "memory usage and available processors",
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = SystemInfo.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public SystemInfo getSystemInformation() {
|
||||
|
||||
SystemInfo info
|
||||
= new SystemInfo(
|
||||
Runtime.getRuntime().freeMemory(),
|
||||
Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(),
|
||||
Runtime.getRuntime().totalMemory(),
|
||||
Runtime.getRuntime().maxMemory(),
|
||||
Runtime.getRuntime().availableProcessors());
|
||||
|
||||
return info;
|
||||
}
|
||||
}
|
@@ -71,33 +71,33 @@ public class RenderResource {
|
||||
@Path("/signature/{signature}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/signature/{signature}/{path:.*}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/hash/{hash}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -105,11 +105,11 @@ public class RenderResource {
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
|
||||
@QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -119,12 +119,12 @@ public class RenderResource {
|
||||
@PathParam("name") String name,
|
||||
@PathParam("path") String inPath,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -133,18 +133,18 @@ public class RenderResource {
|
||||
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme, lang);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
|
||||
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme) {
|
||||
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme, String lang) {
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
|
||||
secret58, prefix, includeResourceIdInPrefix, async, "render", request, response, context);
|
||||
@@ -152,6 +152,9 @@ public class RenderResource {
|
||||
if (theme != null) {
|
||||
renderer.setTheme(theme);
|
||||
}
|
||||
if (lang != null) {
|
||||
renderer.setLang(lang);
|
||||
}
|
||||
return renderer.render();
|
||||
}
|
||||
|
||||
|
@@ -77,7 +77,9 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
|
||||
Boolean hasChatReference = getHasChatReference(session);
|
||||
|
||||
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session), hasChatReference);
|
||||
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
|
||||
@@ -103,4 +105,20 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
return Encoding.valueOf(encoding);
|
||||
}
|
||||
|
||||
private Boolean getHasChatReference(Session session) {
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
List<String> hasChatReferenceList = queryParams.get("haschatreference");
|
||||
|
||||
// Return null if not specified
|
||||
if (hasChatReferenceList != null && hasChatReferenceList.size() == 1) {
|
||||
String value = hasChatReferenceList.get(0).toLowerCase();
|
||||
if (value.equals("true")) {
|
||||
return true;
|
||||
} else if (value.equals("false")) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return null; // Ignored if not present
|
||||
}
|
||||
|
||||
}
|
||||
|
102
src/main/java/org/qortal/api/websocket/DataMonitorSocket.java
Normal file
102
src/main/java/org/qortal/api/websocket/DataMonitorSocket.java
Normal file
@@ -0,0 +1,102 @@
|
||||
package org.qortal.api.websocket;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.WebSocketException;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
|
||||
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.DataMonitorInfo;
|
||||
import org.qortal.event.DataMonitorEvent;
|
||||
import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.Listener;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.List;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class DataMonitorSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(DataMonitorSocket.class);
|
||||
|
||||
@Override
|
||||
public void configure(WebSocketServletFactory factory) {
|
||||
LOGGER.info("configure");
|
||||
|
||||
factory.register(DataMonitorSocket.class);
|
||||
|
||||
EventBus.INSTANCE.addListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
if (!(event instanceof DataMonitorEvent))
|
||||
return;
|
||||
|
||||
DataMonitorEvent dataMonitorEvent = (DataMonitorEvent) event;
|
||||
|
||||
for (Session session : getSessions())
|
||||
sendDataEventSummary(session, buildInfo(dataMonitorEvent));
|
||||
}
|
||||
|
||||
private DataMonitorInfo buildInfo(DataMonitorEvent dataMonitorEvent) {
|
||||
|
||||
return new DataMonitorInfo(
|
||||
dataMonitorEvent.getTimestamp(),
|
||||
dataMonitorEvent.getIdentifier(),
|
||||
dataMonitorEvent.getName(),
|
||||
dataMonitorEvent.getService(),
|
||||
dataMonitorEvent.getDescription(),
|
||||
dataMonitorEvent.getTransactionTimestamp(),
|
||||
dataMonitorEvent.getLatestPutTimestamp()
|
||||
);
|
||||
}
|
||||
|
||||
@OnWebSocketConnect
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
super.onWebSocketConnect(session);
|
||||
}
|
||||
|
||||
@OnWebSocketClose
|
||||
@Override
|
||||
public void onWebSocketClose(Session session, int statusCode, String reason) {
|
||||
super.onWebSocketClose(session, statusCode, reason);
|
||||
}
|
||||
|
||||
@OnWebSocketError
|
||||
public void onWebSocketError(Session session, Throwable throwable) {
|
||||
/* We ignore errors for now, but method here to silence log spam */
|
||||
}
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
LOGGER.info("onWebSocketMessage: message = " + message);
|
||||
}
|
||||
|
||||
private void sendDataEventSummary(Session session, DataMonitorInfo dataMonitorInfo) {
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
|
||||
try {
|
||||
marshall(stringWriter, dataMonitorInfo);
|
||||
|
||||
session.getRemote().sendStringByFuture(stringWriter.toString());
|
||||
} catch (IOException | WebSocketException e) {
|
||||
// No output this time
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@@ -98,7 +98,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
byte[] codeHash = acctInfo.getKey().value;
|
||||
ACCT acct = acctInfo.getValue().get();
|
||||
|
||||
List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
|
||||
List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
|
||||
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
|
||||
null, null, null);
|
||||
|
||||
@@ -259,7 +259,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
ACCT acct = acctInfo.getValue().get();
|
||||
|
||||
Integer dataByteOffset = acct.getModeByteOffset();
|
||||
List<ATStateData> initialAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
|
||||
List<ATStateData> initialAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
|
||||
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
|
||||
null, null, null);
|
||||
|
||||
@@ -298,7 +298,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
byte[] codeHash = acctInfo.getKey().value;
|
||||
ACCT acct = acctInfo.getValue().get();
|
||||
|
||||
List<ATStateData> historicAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
|
||||
List<ATStateData> historicAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
|
||||
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
|
||||
null, null, null);
|
||||
|
||||
|
@@ -0,0 +1,83 @@
|
||||
package org.qortal.api.websocket;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.WebSocketException;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
|
||||
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.data.crosschain.UnsignedFeeEvent;
|
||||
import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.FeeWaitingEvent;
|
||||
import org.qortal.event.Listener;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class UnsignedFeesSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(UnsignedFeesSocket.class);
|
||||
|
||||
@Override
|
||||
public void configure(WebSocketServletFactory factory) {
|
||||
LOGGER.info("configure");
|
||||
|
||||
factory.register(UnsignedFeesSocket.class);
|
||||
|
||||
EventBus.INSTANCE.addListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
if (!(event instanceof FeeWaitingEvent))
|
||||
return;
|
||||
|
||||
for (Session session : getSessions()) {
|
||||
FeeWaitingEvent feeWaitingEvent = (FeeWaitingEvent) event;
|
||||
sendUnsignedFeeEvent(session, new UnsignedFeeEvent(feeWaitingEvent.isPositive(), feeWaitingEvent.getAddress()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@OnWebSocketConnect
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
super.onWebSocketConnect(session);
|
||||
}
|
||||
|
||||
@OnWebSocketClose
|
||||
@Override
|
||||
public void onWebSocketClose(Session session, int statusCode, String reason) {
|
||||
super.onWebSocketClose(session, statusCode, reason);
|
||||
}
|
||||
|
||||
@OnWebSocketError
|
||||
public void onWebSocketError(Session session, Throwable throwable) {
|
||||
/* We ignore errors for now, but method here to silence log spam */
|
||||
}
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
LOGGER.info("onWebSocketMessage: message = " + message);
|
||||
}
|
||||
|
||||
private void sendUnsignedFeeEvent(Session session, UnsignedFeeEvent unsignedFeeEvent) {
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
|
||||
try {
|
||||
marshall(stringWriter, unsignedFeeEvent);
|
||||
|
||||
session.getRemote().sendStringByFuture(stringWriter.toString());
|
||||
} catch (IOException | WebSocketException e) {
|
||||
// No output this time
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@@ -4,9 +4,12 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
@@ -23,37 +26,53 @@ public class ArbitraryDataDigest {
|
||||
}
|
||||
|
||||
public void compute() throws IOException, DataException {
|
||||
List<Path> allPaths = Files.walk(path).filter(Files::isRegularFile).sorted().collect(Collectors.toList());
|
||||
List<Path> allPaths = Files.walk(path)
|
||||
.filter(Files::isRegularFile)
|
||||
.sorted()
|
||||
.collect(Collectors.toList());
|
||||
|
||||
Path basePathAbsolute = this.path.toAbsolutePath();
|
||||
|
||||
|
||||
MessageDigest sha256;
|
||||
try {
|
||||
sha256 = MessageDigest.getInstance("SHA-256");
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new DataException("SHA-256 hashing algorithm unavailable");
|
||||
}
|
||||
|
||||
|
||||
for (Path path : allPaths) {
|
||||
// We need to work with paths relative to the base path, to ensure the same hash
|
||||
// is generated on different systems
|
||||
Path relativePath = basePathAbsolute.relativize(path.toAbsolutePath());
|
||||
|
||||
|
||||
// Exclude Qortal folder since it can be different each time
|
||||
// We only care about hashing the actual user data
|
||||
if (relativePath.startsWith(".qortal/")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Account for \ VS / : Linux VS Windows
|
||||
String pathString = relativePath.toString();
|
||||
if (relativePath.getFileSystem().toString().contains("Windows")) {
|
||||
pathString = pathString.replace("\\", "/");
|
||||
}
|
||||
|
||||
// Hash path
|
||||
byte[] filePathBytes = relativePath.toString().getBytes(StandardCharsets.UTF_8);
|
||||
byte[] filePathBytes = pathString.getBytes(StandardCharsets.UTF_8);
|
||||
sha256.update(filePathBytes);
|
||||
|
||||
// Hash contents
|
||||
byte[] fileContent = Files.readAllBytes(path);
|
||||
sha256.update(fileContent);
|
||||
|
||||
try (InputStream in = Files.newInputStream(path)) {
|
||||
byte[] buffer = new byte[65536]; // 64 KB
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
sha256.update(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.hash = sha256.digest();
|
||||
}
|
||||
|
||||
|
||||
public boolean isHashValid(byte[] hash) {
|
||||
return Arrays.equals(hash, this.hash);
|
||||
|
@@ -52,7 +52,7 @@ public class ArbitraryDataFile {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class);
|
||||
|
||||
public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB
|
||||
public static final long MAX_FILE_SIZE = 2L * 1024 * 1024 * 1024; // 2 GiB
|
||||
protected static final int MAX_CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
|
||||
public static final int CHUNK_SIZE = 512 * 1024; // 0.5MiB
|
||||
public static int SHORT_DIGEST_LENGTH = 8;
|
||||
|
@@ -439,7 +439,15 @@ public class ArbitraryDataReader {
|
||||
// Ensure the complete hash matches the joined chunks
|
||||
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
|
||||
// Delete the invalid file
|
||||
arbitraryDataFile.delete();
|
||||
LOGGER.info("Deleting invalid file: path = " + arbitraryDataFile.getFilePath());
|
||||
|
||||
if( arbitraryDataFile.delete() ) {
|
||||
LOGGER.info("Deleted invalid file successfully: path = " + arbitraryDataFile.getFilePath());
|
||||
}
|
||||
else {
|
||||
LOGGER.warn("Could not delete invalid file: path = " + arbitraryDataFile.getFilePath());
|
||||
}
|
||||
|
||||
throw new DataException("Unable to validate complete file hash");
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package org.qortal.arbitrary;
|
||||
|
||||
import com.google.common.io.Resources;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@@ -15,11 +16,13 @@ import org.qortal.settings.Settings;
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
@@ -37,6 +40,7 @@ public class ArbitraryDataRenderer {
|
||||
private final Service service;
|
||||
private final String identifier;
|
||||
private String theme = "light";
|
||||
private String lang = "en";
|
||||
private String inPath;
|
||||
private final String secret58;
|
||||
private final String prefix;
|
||||
@@ -166,9 +170,16 @@ public class ArbitraryDataRenderer {
|
||||
if (HTMLParser.isHtmlFile(filename)) {
|
||||
// HTML file - needs to be parsed
|
||||
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
|
||||
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
|
||||
String encodedResourceId;
|
||||
|
||||
if (resourceIdType == ResourceIdType.NAME) {
|
||||
encodedResourceId = resourceId.replace(" ", "%20");
|
||||
} else {
|
||||
encodedResourceId = resourceId;
|
||||
}
|
||||
HTMLParser htmlParser = new HTMLParser(encodedResourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting, lang);
|
||||
htmlParser.addAdditionalHeaderTags();
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss:;");
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss: blob:;");
|
||||
response.setContentType(context.getMimeType(filename));
|
||||
response.setContentLength(htmlParser.getData().length);
|
||||
response.getOutputStream().write(htmlParser.getData());
|
||||
@@ -256,5 +267,8 @@ public class ArbitraryDataRenderer {
|
||||
public void setTheme(String theme) {
|
||||
this.theme = theme;
|
||||
}
|
||||
public void setLang(String lang) {
|
||||
this.lang = lang;
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@@ -197,7 +198,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
// We can't use PATCH for on-chain data because this requires the .qortal directory, which can't be put on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(this.path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
if (shouldUseOnChainData) {
|
||||
LOGGER.info("Data size is small enough to go on chain - using PUT");
|
||||
return Method.PUT;
|
||||
@@ -245,7 +246,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
|
||||
// Use zip compression if data isn't going on chain
|
||||
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;
|
||||
|
@@ -37,7 +37,7 @@ public enum Service {
|
||||
if (files != null && files[0] != null) {
|
||||
final String extension = FilenameUtils.getExtension(files[0].getName()).toLowerCase();
|
||||
// We must allow blank file extensions because these are used by data published from a plaintext or base64-encoded string
|
||||
final List<String> allowedExtensions = Arrays.asList("zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
|
||||
final List<String> allowedExtensions = Arrays.asList("qortal", "zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
|
||||
if (extension == null || !allowedExtensions.contains(extension)) {
|
||||
return ValidationResult.INVALID_FILE_EXTENSION;
|
||||
}
|
||||
@@ -62,7 +62,17 @@ public enum Service {
|
||||
|
||||
// Custom validation function to require an index HTML file in the root directory
|
||||
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
|
||||
String[] files = path.toFile().list();
|
||||
List<String> files;
|
||||
|
||||
// single files are paackaged differently
|
||||
if( path.toFile().isFile() ) {
|
||||
files = new ArrayList<>(1);
|
||||
files.add(path.getFileName().toString());
|
||||
}
|
||||
else {
|
||||
files = new ArrayList<>(Arrays.asList(path.toFile().list()));
|
||||
}
|
||||
|
||||
if (files != null) {
|
||||
for (String file : files) {
|
||||
Path fileName = Paths.get(file).getFileName();
|
||||
|
@@ -23,8 +23,10 @@ import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.block.BlockTransactionData;
|
||||
import org.qortal.data.group.GroupAdminData;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.group.Group;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.AtTransaction;
|
||||
@@ -37,6 +39,7 @@ import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Amounts;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Groups;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
@@ -145,10 +148,10 @@ public class Block {
|
||||
|
||||
private final Account recipientAccount;
|
||||
private final AccountData recipientAccountData;
|
||||
|
||||
|
||||
final BlockChain blockChain = BlockChain.getInstance();
|
||||
|
||||
ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException {
|
||||
ExpandedAccount(Repository repository, RewardShareData rewardShareData, int blockHeight) throws DataException {
|
||||
this.rewardShareData = rewardShareData;
|
||||
this.sharePercent = this.rewardShareData.getSharePercent();
|
||||
|
||||
@@ -157,7 +160,12 @@ public class Block {
|
||||
this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags());
|
||||
|
||||
this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress());
|
||||
this.isMinterMember = repository.getGroupRepository().memberExists(BlockChain.getInstance().getMintingGroupId(), this.mintingAccount.getAddress());
|
||||
this.isMinterMember
|
||||
= Groups.memberExistsInAnyGroup(
|
||||
repository.getGroupRepository(),
|
||||
Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight),
|
||||
this.mintingAccount.getAddress()
|
||||
);
|
||||
|
||||
if (this.isRecipientAlsoMinter) {
|
||||
// Self-share: minter is also recipient
|
||||
@@ -170,6 +178,19 @@ public class Block {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Effective Minting Level
|
||||
*
|
||||
* @return the effective minting level, if a data exception is thrown, it catches the exception and returns a zero
|
||||
*/
|
||||
public int getEffectiveMintingLevel() {
|
||||
try {
|
||||
return this.mintingAccount.getEffectiveMintingLevel();
|
||||
} catch (DataException e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public Account getMintingAccount() {
|
||||
return this.mintingAccount;
|
||||
}
|
||||
@@ -186,7 +207,7 @@ public class Block {
|
||||
* @return account-level share "bin" from blockchain config, or null if founder / none found
|
||||
*/
|
||||
public AccountLevelShareBin getShareBin(int blockHeight) {
|
||||
if (this.isMinterFounder)
|
||||
if (this.isMinterFounder && blockHeight < BlockChain.getInstance().getAdminsReplaceFoundersHeight())
|
||||
return null;
|
||||
|
||||
final int accountLevel = this.mintingAccountData.getLevel();
|
||||
@@ -403,7 +424,9 @@ public class Block {
|
||||
onlineAccounts.removeIf(a -> a.getNonce() == null || a.getNonce() < 0);
|
||||
|
||||
// After feature trigger, remove any online accounts that are level 0
|
||||
if (height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
// but only if they are before the ignore level feature trigger
|
||||
if (height < BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() &&
|
||||
height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
onlineAccounts.removeIf(a -> {
|
||||
try {
|
||||
return Account.getRewardShareEffectiveMintingLevel(repository, a.getPublicKey()) == 0;
|
||||
@@ -414,6 +437,21 @@ public class Block {
|
||||
});
|
||||
}
|
||||
|
||||
// After feature trigger, remove any online accounts that are not minter group member
|
||||
if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) {
|
||||
onlineAccounts.removeIf(a -> {
|
||||
try {
|
||||
List<Integer> groupIdsToMint = Groups.getGroupIdsToMint(BlockChain.getInstance(), height);
|
||||
String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey());
|
||||
boolean isMinterGroupMember = Groups.memberExistsInAnyGroup(repository.getGroupRepository(), groupIdsToMint, address);
|
||||
return !isMinterGroupMember;
|
||||
} catch (DataException e) {
|
||||
// Something went wrong, so remove the account
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (onlineAccounts.isEmpty()) {
|
||||
LOGGER.debug("No online accounts - not even our own?");
|
||||
return null;
|
||||
@@ -721,19 +759,11 @@ public class Block {
|
||||
List<ExpandedAccount> expandedAccounts = new ArrayList<>();
|
||||
|
||||
for (RewardShareData rewardShare : this.cachedOnlineRewardShares) {
|
||||
if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
|
||||
}
|
||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
boolean isMinterGroupMember = repository.getGroupRepository().memberExists(BlockChain.getInstance().getMintingGroupId(), rewardShare.getMinter());
|
||||
if (isMinterGroupMember) {
|
||||
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
|
||||
}
|
||||
}
|
||||
expandedAccounts.add(new ExpandedAccount(repository, rewardShare, this.blockData.getHeight()));
|
||||
}
|
||||
|
||||
|
||||
this.cachedExpandedAccounts = expandedAccounts;
|
||||
LOGGER.trace(() -> String.format("Online reward-shares after expanded accounts %s", this.cachedOnlineRewardShares));
|
||||
|
||||
return this.cachedExpandedAccounts;
|
||||
}
|
||||
@@ -1139,14 +1169,32 @@ public class Block {
|
||||
if (onlineRewardShares == null)
|
||||
return ValidationResult.ONLINE_ACCOUNT_UNKNOWN;
|
||||
|
||||
// After feature trigger, require all online account minters to be greater than level 0
|
||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||
// After feature trigger, require all online account minters to be greater than level 0,
|
||||
// but only if it is before the feature trigger where we ignore level again
|
||||
if (this.blockData.getHeight() < BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() &&
|
||||
this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
List<ExpandedAccount> expandedAccounts
|
||||
= this.getExpandedAccounts().stream()
|
||||
.filter(expandedAccount -> expandedAccount.isMinterMember)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
for (ExpandedAccount account : expandedAccounts) {
|
||||
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
|
||||
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
||||
|
||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
if (!account.isMinterMember)
|
||||
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (this.blockData.getHeight() >= BlockChain.getInstance().getIgnoreLevelForRewardShareHeight()){
|
||||
Optional<ExpandedAccount> anyInvalidAccount
|
||||
= this.getExpandedAccounts().stream()
|
||||
.filter(account -> !account.isMinterMember)
|
||||
.findAny();
|
||||
if( anyInvalidAccount.isPresent() ) return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
||||
}
|
||||
|
||||
// If block is past a certain age then we simply assume the signatures were correct
|
||||
long signatureRequirementThreshold = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMinLifetime();
|
||||
@@ -1273,6 +1321,7 @@ public class Block {
|
||||
|
||||
// Online Accounts
|
||||
ValidationResult onlineAccountsResult = this.areOnlineAccountsValid();
|
||||
LOGGER.trace("Accounts valid = {}", onlineAccountsResult);
|
||||
if (onlineAccountsResult != ValidationResult.OK)
|
||||
return onlineAccountsResult;
|
||||
|
||||
@@ -1361,7 +1410,7 @@ public class Block {
|
||||
// Check transaction can even be processed
|
||||
validationResult = transaction.isProcessable();
|
||||
if (validationResult != Transaction.ValidationResult.OK) {
|
||||
LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
||||
LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
||||
return ValidationResult.TRANSACTION_INVALID;
|
||||
}
|
||||
|
||||
@@ -1562,6 +1611,7 @@ public class Block {
|
||||
this.blockData.setHeight(blockchainHeight + 1);
|
||||
|
||||
LOGGER.trace(() -> String.format("Processing block %d", this.blockData.getHeight()));
|
||||
LOGGER.trace(() -> String.format("Online Reward Shares in process %s", this.cachedOnlineRewardShares));
|
||||
|
||||
if (this.blockData.getHeight() > 1) {
|
||||
|
||||
@@ -1590,6 +1640,8 @@ public class Block {
|
||||
SelfSponsorshipAlgoV2Block.processAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
|
||||
SelfSponsorshipAlgoV3Block.processAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
PrimaryNamesBlock.processNames(this.repository);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1633,7 +1685,17 @@ public class Block {
|
||||
final List<Integer> cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
|
||||
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
|
||||
|
||||
final List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||
final List<ExpandedAccount> expandedAccounts;
|
||||
|
||||
if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
|
||||
}
|
||||
else {
|
||||
expandedAccounts
|
||||
= this.getExpandedAccounts().stream()
|
||||
.filter(expandedAccount -> expandedAccount.isMinterMember)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
Set<AccountData> allUniqueExpandedAccounts = new HashSet<>();
|
||||
for (ExpandedAccount expandedAccount : expandedAccounts) {
|
||||
@@ -1661,11 +1723,19 @@ public class Block {
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() + 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s up to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
int blocksMintedAdjustment
|
||||
=
|
||||
(this.blockData.getHeight() > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
|
||||
?
|
||||
0
|
||||
:
|
||||
accountData.getBlocksMintedAdjustment();
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
if (newLevel > accountData.getLevel()) {
|
||||
if (newLevel != accountData.getLevel()) {
|
||||
// Account has increased in level!
|
||||
accountData.setLevel(newLevel);
|
||||
bumpedAccounts.put(accountData.getAddress(), newLevel);
|
||||
@@ -1892,6 +1962,8 @@ public class Block {
|
||||
SelfSponsorshipAlgoV2Block.orphanAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
|
||||
SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
PrimaryNamesBlock.orphanNames( this.repository );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2033,7 +2105,17 @@ public class Block {
|
||||
final List<Integer> cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
|
||||
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
|
||||
|
||||
final List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||
final List<ExpandedAccount> expandedAccounts;
|
||||
|
||||
if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
|
||||
}
|
||||
else {
|
||||
expandedAccounts
|
||||
= this.getExpandedAccounts().stream()
|
||||
.filter(expandedAccount -> expandedAccount.isMinterMember)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
Set<AccountData> allUniqueExpandedAccounts = new HashSet<>();
|
||||
for (ExpandedAccount expandedAccount : expandedAccounts) {
|
||||
@@ -2057,11 +2139,19 @@ public class Block {
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() - 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s down to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
int blocksMintedAdjustment
|
||||
=
|
||||
(this.blockData.getHeight() -1 > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
|
||||
?
|
||||
0
|
||||
:
|
||||
accountData.getBlocksMintedAdjustment();
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
if (newLevel < accountData.getLevel()) {
|
||||
if (newLevel != accountData.getLevel()) {
|
||||
// Account has decreased in level!
|
||||
accountData.setLevel(newLevel);
|
||||
repository.getAccountRepository().setLevel(accountData);
|
||||
@@ -2237,7 +2327,17 @@ public class Block {
|
||||
List<BlockRewardCandidate> rewardCandidates = new ArrayList<>();
|
||||
|
||||
// All online accounts
|
||||
final List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||
final List<ExpandedAccount> expandedAccounts;
|
||||
|
||||
if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
|
||||
expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
|
||||
}
|
||||
else {
|
||||
expandedAccounts
|
||||
= this.getExpandedAccounts().stream()
|
||||
.filter(expandedAccount -> expandedAccount.isMinterMember)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/*
|
||||
* Distribution rules:
|
||||
@@ -2280,7 +2380,6 @@ public class Block {
|
||||
// Select the correct set of share bins based on block height
|
||||
List<AccountLevelShareBin> accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
|
||||
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
|
||||
|
||||
// Determine reward candidates based on account level
|
||||
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
|
||||
List<AccountLevelShareBin> accountLevelShareBins = new ArrayList<>();
|
||||
@@ -2363,7 +2462,7 @@ public class Block {
|
||||
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShareAtHeight(this.blockData.getHeight());
|
||||
|
||||
// Perform account-level-based reward scaling if appropriate
|
||||
if (!haveFounders) {
|
||||
if (!haveFounders && this.blockData.getHeight() < BlockChain.getInstance().getAdminsReplaceFoundersHeight() ) {
|
||||
// Recalculate distribution ratios based on candidates
|
||||
|
||||
// Nothing shared? This shouldn't happen
|
||||
@@ -2399,18 +2498,103 @@ public class Block {
|
||||
}
|
||||
|
||||
// Add founders as reward candidate if appropriate
|
||||
if (haveFounders) {
|
||||
if (haveFounders && this.blockData.getHeight() < BlockChain.getInstance().getAdminsReplaceFoundersHeight()) {
|
||||
// Yes: add to reward candidates list
|
||||
BlockRewardDistributor founderDistributor = (distributionAmount, balanceChanges) -> distributeBlockRewardShare(distributionAmount, onlineFounderAccounts, balanceChanges);
|
||||
|
||||
final long foundersShare = 1_00000000 - totalShares;
|
||||
BlockRewardCandidate rewardCandidate = new BlockRewardCandidate("Founders", foundersShare, founderDistributor);
|
||||
rewardCandidates.add(rewardCandidate);
|
||||
LOGGER.info("logging foundersShare prior to reward modifications {}",foundersShare);
|
||||
}
|
||||
else if (this.blockData.getHeight() >= BlockChain.getInstance().getAdminsReplaceFoundersHeight()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
GroupRepository groupRepository = repository.getGroupRepository();
|
||||
|
||||
List<Integer> mintingGroupIds = Groups.getGroupIdsToMint(BlockChain.getInstance(), this.blockData.getHeight());
|
||||
|
||||
// all minter admins
|
||||
List<String> minterAdmins = Groups.getAllAdmins(groupRepository, mintingGroupIds);
|
||||
|
||||
// all minter admins that are online
|
||||
List<ExpandedAccount> onlineMinterAdminAccounts
|
||||
= expandedAccounts.stream()
|
||||
.filter(expandedAccount -> minterAdmins.contains(expandedAccount.getMintingAccount().getAddress()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
long minterAdminShare;
|
||||
|
||||
if( onlineMinterAdminAccounts.isEmpty() ) {
|
||||
minterAdminShare = 0;
|
||||
}
|
||||
else {
|
||||
BlockRewardDistributor minterAdminDistributor
|
||||
= (distributionAmount, balanceChanges)
|
||||
->
|
||||
distributeBlockRewardShare(distributionAmount, onlineMinterAdminAccounts, balanceChanges);
|
||||
|
||||
long adminShare = 1_00000000 - totalShares;
|
||||
LOGGER.info("initial total Shares: {}", totalShares);
|
||||
LOGGER.info("logging adminShare after hardfork, this is the primary reward that will be split {}", adminShare);
|
||||
|
||||
minterAdminShare = adminShare / 2;
|
||||
BlockRewardCandidate minterAdminRewardCandidate
|
||||
= new BlockRewardCandidate("Minter Admins", minterAdminShare, minterAdminDistributor);
|
||||
rewardCandidates.add(minterAdminRewardCandidate);
|
||||
|
||||
totalShares += minterAdminShare;
|
||||
}
|
||||
|
||||
LOGGER.info("MINTER ADMIN SHARE: {}",minterAdminShare);
|
||||
|
||||
// all dev admins
|
||||
List<String> devAdminAddresses
|
||||
= groupRepository.getGroupAdmins(1).stream()
|
||||
.map(GroupAdminData::getAdmin)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
LOGGER.info("Removing NULL Account Address, Dev Admin Count = {}", devAdminAddresses.size());
|
||||
devAdminAddresses.removeIf( address -> Group.NULL_OWNER_ADDRESS.equals(address) );
|
||||
LOGGER.info("Removed NULL Account Address, Dev Admin Count = {}", devAdminAddresses.size());
|
||||
|
||||
BlockRewardDistributor devAdminDistributor
|
||||
= (distributionAmount, balanceChanges) -> distributeToAccounts(distributionAmount, devAdminAddresses, balanceChanges);
|
||||
|
||||
long devAdminShare = 1_00000000 - totalShares;
|
||||
LOGGER.info("DEV ADMIN SHARE: {}",devAdminShare);
|
||||
BlockRewardCandidate devAdminRewardCandidate
|
||||
= new BlockRewardCandidate("Dev Admins", devAdminShare,devAdminDistributor);
|
||||
rewardCandidates.add(devAdminRewardCandidate);
|
||||
}
|
||||
}
|
||||
|
||||
return rewardCandidates;
|
||||
}
|
||||
|
||||
/**
|
||||
* Distribute To Accounts
|
||||
*
|
||||
* Merges distribute shares to a map of distribution shares.
|
||||
*
|
||||
* @param distributionAmount the amount to distribute
|
||||
* @param accountAddressess the addresses to distribute to
|
||||
* @param balanceChanges the map of distribution shares, this gets appended to
|
||||
*
|
||||
* @return the total amount mapped to addresses for distribution
|
||||
*/
|
||||
public static long distributeToAccounts(long distributionAmount, List<String> accountAddressess, Map<String, Long> balanceChanges) {
|
||||
|
||||
if( accountAddressess.isEmpty() ) return 0;
|
||||
|
||||
long distibutionShare = distributionAmount / accountAddressess.size();
|
||||
|
||||
for(String accountAddress : accountAddressess ) {
|
||||
balanceChanges.merge(accountAddress, distibutionShare, Long::sum);
|
||||
}
|
||||
|
||||
return distibutionShare * accountAddressess.size();
|
||||
}
|
||||
|
||||
private static long distributeBlockRewardShare(long distributionAmount, List<ExpandedAccount> accounts, Map<String, Long> balanceChanges) {
|
||||
// Collate all expanded accounts by minting account
|
||||
Map<String, List<ExpandedAccount>> accountsByMinter = new HashMap<>();
|
||||
@@ -2570,9 +2754,11 @@ public class Block {
|
||||
return;
|
||||
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(this.repository, this.getMinter().getPublicKey());
|
||||
String minterAddress = Account.getRewardShareMintingAddress(this.repository, this.getMinter().getPublicKey());
|
||||
|
||||
LOGGER.debug(String.format("======= BLOCK %d (%.8s) =======", this.getBlockData().getHeight(), Base58.encode(this.getSignature())));
|
||||
LOGGER.debug(String.format("Timestamp: %d", this.getBlockData().getTimestamp()));
|
||||
LOGGER.debug(String.format("Minter address: %s", minterAddress));
|
||||
LOGGER.debug(String.format("Minter level: %d", minterLevel));
|
||||
LOGGER.debug(String.format("Online accounts: %d", this.getBlockData().getOnlineAccountsCount()));
|
||||
LOGGER.debug(String.format("AT count: %d", this.getBlockData().getATCount()));
|
||||
|
@@ -88,7 +88,13 @@ public class BlockChain {
|
||||
onlyMintWithNameHeight,
|
||||
removeOnlyMintWithNameHeight,
|
||||
groupMemberCheckHeight,
|
||||
fixBatchRewardHeight
|
||||
fixBatchRewardHeight,
|
||||
adminsReplaceFoundersHeight,
|
||||
nullGroupMembershipHeight,
|
||||
ignoreLevelForRewardShareHeight,
|
||||
adminQueryFixHeight,
|
||||
multipleNamesPerAccountHeight,
|
||||
mintedBlocksAdjustmentRemovalHeight
|
||||
}
|
||||
|
||||
// Custom transaction fees
|
||||
@@ -108,7 +114,8 @@ public class BlockChain {
|
||||
/** Whether to use legacy, broken RIPEMD160 implementation when converting public keys to addresses. */
|
||||
private boolean useBrokenMD160ForAddresses = false;
|
||||
|
||||
/** Whether only one registered name is allowed per account. */
|
||||
/** This should get ignored and overwritten in the oneNamePerAccount(int blockchainHeight) method,
|
||||
* because it is based on block height, not based on the genesis block.*/
|
||||
private boolean oneNamePerAccount = false;
|
||||
|
||||
/** Checkpoints */
|
||||
@@ -208,7 +215,13 @@ public class BlockChain {
|
||||
private int minAccountLevelToRewardShare;
|
||||
private int maxRewardSharesPerFounderMintingAccount;
|
||||
private int founderEffectiveMintingLevel;
|
||||
private int mintingGroupId;
|
||||
|
||||
public static class IdsForHeight {
|
||||
public int height;
|
||||
public List<Integer> ids;
|
||||
}
|
||||
|
||||
private List<IdsForHeight> mintingGroupIds;
|
||||
|
||||
/** Minimum time to retain online account signatures (ms) for block validity checks. */
|
||||
private long onlineAccountSignaturesMinLifetime;
|
||||
@@ -464,8 +477,9 @@ public class BlockChain {
|
||||
return this.useBrokenMD160ForAddresses;
|
||||
}
|
||||
|
||||
public boolean oneNamePerAccount() {
|
||||
return this.oneNamePerAccount;
|
||||
public boolean oneNamePerAccount(int blockchainHeight) {
|
||||
// this is not set on a simple blockchain setting, it is based on a feature trigger height
|
||||
return blockchainHeight < this.getMultipleNamesPerAccountHeight();
|
||||
}
|
||||
|
||||
public List<Checkpoint> getCheckpoints() {
|
||||
@@ -540,8 +554,8 @@ public class BlockChain {
|
||||
return this.onlineAccountSignaturesMaxLifetime;
|
||||
}
|
||||
|
||||
public int getMintingGroupId() {
|
||||
return this.mintingGroupId;
|
||||
public List<IdsForHeight> getMintingGroupIds() {
|
||||
return mintingGroupIds;
|
||||
}
|
||||
|
||||
public CiyamAtSettings getCiyamAtSettings() {
|
||||
@@ -662,6 +676,30 @@ public class BlockChain {
|
||||
return this.featureTriggers.get(FeatureTrigger.fixBatchRewardHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getAdminsReplaceFoundersHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.adminsReplaceFoundersHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getNullGroupMembershipHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.nullGroupMembershipHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getIgnoreLevelForRewardShareHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.ignoreLevelForRewardShareHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getAdminQueryFixHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.adminQueryFixHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getMultipleNamesPerAccountHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.multipleNamesPerAccountHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getMintedBlocksAdjustmentRemovalHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.mintedBlocksAdjustmentRemovalHeight.name()).intValue();
|
||||
}
|
||||
|
||||
// More complex getters for aspects that change by height or timestamp
|
||||
|
||||
public long getRewardAtHeight(int ourHeight) {
|
||||
|
47
src/main/java/org/qortal/block/PrimaryNamesBlock.java
Normal file
47
src/main/java/org/qortal/block/PrimaryNamesBlock.java
Normal file
@@ -0,0 +1,47 @@
|
||||
package org.qortal.block;
|
||||
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Class PrimaryNamesBlock
|
||||
*/
|
||||
public class PrimaryNamesBlock {
|
||||
|
||||
/**
|
||||
* Process Primary Names
|
||||
*
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void processNames(Repository repository) throws DataException {
|
||||
|
||||
Set<String> addressesWithNames
|
||||
= repository.getNameRepository().getAllNames().stream()
|
||||
.map(NameData::getOwner).collect(Collectors.toSet());
|
||||
|
||||
// for each address with a name, set primary name to the address
|
||||
for( String address : addressesWithNames ) {
|
||||
|
||||
Account account = new Account(repository, address);
|
||||
account.resetPrimaryName(TransactionsResource.ConfirmationStatus.CONFIRMED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Orphan the Primary Names Block
|
||||
*
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void orphanNames(Repository repository) throws DataException {
|
||||
|
||||
repository.getNameRepository().clearPrimaryNames();
|
||||
}
|
||||
}
|
@@ -97,364 +97,375 @@ public class BlockMinter extends Thread {
|
||||
|
||||
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
|
||||
// Flags for tracking change in whether minting is possible,
|
||||
// so we can notify Controller, and further update SysTray, etc.
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
// Flags for tracking change in whether minting is possible,
|
||||
// so we can notify Controller, and further update SysTray, etc.
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
try {
|
||||
while (running) {
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
// recreate repository for new loop iteration
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
wasMintingPossible = isMintingPossible;
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
|
||||
try {
|
||||
// Free up any repository locks
|
||||
repository.discardChanges();
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
|
||||
// Sleep for a while.
|
||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint(true)) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (!Synchronizer.getInstance().getRecoveryMode())
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
||||
continue;
|
||||
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
}
|
||||
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
wasMintingPossible = isMintingPossible;
|
||||
|
||||
try {
|
||||
// Clear repository session state so we have latest view of data
|
||||
// reset the repository, to the repository recreated for this loop iteration
|
||||
for( Block newBlock : newBlocks ) newBlock.setRepository(repository);
|
||||
|
||||
// Free up any repository locks
|
||||
repository.discardChanges();
|
||||
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
// Sleep for a while.
|
||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint(true)) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (!Synchronizer.getInstance().getRecoveryMode())
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
Long unconfirmedStartTime = NTP.getTime();
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime()-unconfirmedStartTime)));
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
|
||||
try {
|
||||
newBlock.process();
|
||||
// Clear repository session state so we have latest view of data
|
||||
repository.discardChanges();
|
||||
|
||||
repository.saveChanges();
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
continue;
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
} catch (ArithmeticException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
continue;
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
Long unconfirmedStartTime = NTP.getTime();
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime() - unconfirmedStartTime)));
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
try {
|
||||
newBlock.process();
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
} catch (ArithmeticException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
Network.getInstance().broadcastOurChain();
|
||||
}
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
Network.getInstance().broadcastOurChain();
|
||||
}
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -13,6 +13,7 @@ import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||
import org.qortal.controller.arbitrary.*;
|
||||
import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
|
||||
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.controller.repository.PruneManager;
|
||||
@@ -36,7 +37,6 @@ import org.qortal.network.Peer;
|
||||
import org.qortal.network.PeerAddress;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepository;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
@@ -46,6 +46,7 @@ import org.qortal.utils.*;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
@@ -53,6 +54,7 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.SecureRandom;
|
||||
@@ -70,6 +72,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class Controller extends Thread {
|
||||
|
||||
@@ -394,6 +397,9 @@ public class Controller extends Thread {
|
||||
|
||||
Controller.newInstance(args);
|
||||
|
||||
|
||||
cleanChunkUploadTempDir(); // cleanup leftover chunks from streaming to disk
|
||||
|
||||
LOGGER.info("Starting NTP");
|
||||
Long ntpOffset = Settings.getInstance().getTestNtpOffset();
|
||||
if (ntpOffset != null)
|
||||
@@ -403,23 +409,44 @@ public class Controller extends Thread {
|
||||
|
||||
LOGGER.info("Starting repository");
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
HSQLDBRepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// RepositoryManager.rebuildTransactionSequences(repository);
|
||||
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
|
||||
}
|
||||
|
||||
if( Settings.getInstance().isDbCacheEnabled() ) {
|
||||
LOGGER.info("Db Cache Starting ...");
|
||||
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
|
||||
hsqldbDataCacheManager.start();
|
||||
if( Settings.getInstance().isDbCacheEnabled() ) {
|
||||
LOGGER.info("Db Cache Starting ...");
|
||||
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager();
|
||||
hsqldbDataCacheManager.start();
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Db Cache Disabled");
|
||||
}
|
||||
|
||||
LOGGER.info("Arbitrary Indexing Starting ...");
|
||||
ArbitraryIndexUtils.startCaching(
|
||||
Settings.getInstance().getArbitraryIndexingPriority(),
|
||||
Settings.getInstance().getArbitraryIndexingFrequency()
|
||||
);
|
||||
|
||||
if( Settings.getInstance().isBalanceRecorderEnabled() ) {
|
||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
||||
|
||||
if( recorder.isPresent() ) {
|
||||
LOGGER.info("Balance Recorder Starting ...");
|
||||
recorder.get().start();
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Db Cache Disabled");
|
||||
LOGGER.info("Balance Recorder won't start.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Balance Recorder Disabled");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
// If exception has no cause or message then repository is in use by some other process.
|
||||
if (e.getCause() == null && e.getMessage() == null) {
|
||||
@@ -524,9 +551,25 @@ public class Controller extends Thread {
|
||||
ArbitraryDataStorageManager.getInstance().start();
|
||||
ArbitraryDataRenderManager.getInstance().start();
|
||||
|
||||
// start rebuild arbitrary resource cache timer task
|
||||
if( Settings.getInstance().isRebuildArbitraryResourceCacheTaskEnabled() ) {
|
||||
new Timer().schedule(
|
||||
new RebuildArbitraryResourceCacheTask(),
|
||||
Settings.getInstance().getRebuildArbitraryResourceCacheTaskDelay() * RebuildArbitraryResourceCacheTask.MILLIS_IN_MINUTE,
|
||||
Settings.getInstance().getRebuildArbitraryResourceCacheTaskPeriod() * RebuildArbitraryResourceCacheTask.MILLIS_IN_HOUR
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
LOGGER.info("Starting online accounts manager");
|
||||
OnlineAccountsManager.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting foreign fees manager");
|
||||
ForeignFeesManager.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting follower");
|
||||
Follower.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting transaction importer");
|
||||
TransactionImporter.getInstance().start();
|
||||
|
||||
@@ -639,10 +682,8 @@ public class Controller extends Thread {
|
||||
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
||||
boolean needsArchiveRebuild = false;
|
||||
int checkHeight = 0;
|
||||
Repository repository = null;
|
||||
|
||||
try {
|
||||
repository = RepositoryManager.getRepository();
|
||||
try (final Repository repository = RepositoryManager.getRepository()){
|
||||
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
|
||||
checkHeight = repository.getBlockRepository().getBlockchainHeight();
|
||||
} catch (DataException e) {
|
||||
@@ -1099,6 +1140,9 @@ public class Controller extends Thread {
|
||||
LOGGER.info("Shutting down online accounts manager");
|
||||
OnlineAccountsManager.getInstance().shutdown();
|
||||
|
||||
LOGGER.info("Shutting down foreign fees manager");
|
||||
ForeignFeesManager.getInstance().shutdown();
|
||||
|
||||
LOGGER.info("Shutting down transaction importer");
|
||||
TransactionImporter.getInstance().shutdown();
|
||||
|
||||
@@ -1443,6 +1487,14 @@ public class Controller extends Thread {
|
||||
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message);
|
||||
break;
|
||||
|
||||
case GET_FOREIGN_FEES:
|
||||
ForeignFeesManager.getInstance().onNetworkGetForeignFeesMessage(peer, message);
|
||||
break;
|
||||
|
||||
case FOREIGN_FEES:
|
||||
ForeignFeesManager.getInstance().onNetworkForeignFeesMessage(peer, message);
|
||||
break;
|
||||
|
||||
case GET_ARBITRARY_DATA:
|
||||
// Not currently supported
|
||||
break;
|
||||
@@ -2129,6 +2181,24 @@ public class Controller extends Thread {
|
||||
return now - offset;
|
||||
}
|
||||
|
||||
private static void cleanChunkUploadTempDir() {
|
||||
Path uploadsTemp = Paths.get("uploads-temp");
|
||||
if (!Files.exists(uploadsTemp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try (Stream<Path> paths = Files.walk(uploadsTemp)) {
|
||||
paths.sorted(Comparator.reverseOrder())
|
||||
.map(Path::toFile)
|
||||
.forEach(File::delete);
|
||||
|
||||
LOGGER.info("Cleaned up all temporary uploads in {}", uploadsTemp);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to clean up uploads-temp directory", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public StatsSnapshot getStatsSnapshot() {
|
||||
return this.stats;
|
||||
}
|
||||
|
1202
src/main/java/org/qortal/controller/ForeignFeesManager.java
Normal file
1202
src/main/java/org/qortal/controller/ForeignFeesManager.java
Normal file
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Groups;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
@@ -225,11 +226,14 @@ public class OnlineAccountsManager {
|
||||
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
|
||||
Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>();
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
int blockHeight = repository.getBlockRepository().getBlockchainHeight();
|
||||
|
||||
List<String> mintingGroupMemberAddresses
|
||||
= repository.getGroupRepository()
|
||||
.getGroupMembers(BlockChain.getInstance().getMintingGroupId()).stream()
|
||||
.map(GroupMemberData::getMember)
|
||||
.collect(Collectors.toList());
|
||||
= Groups.getAllMembers(
|
||||
repository.getGroupRepository(),
|
||||
Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight)
|
||||
);
|
||||
|
||||
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
|
||||
if (isStopping)
|
||||
|
@@ -2,6 +2,7 @@ package org.qortal.controller;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.arbitrary.PeerMessage;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
@@ -20,7 +21,11 @@ import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TransactionImporter extends Thread {
|
||||
@@ -50,6 +55,10 @@ public class TransactionImporter extends Thread {
|
||||
/** Cached list of unconfirmed transactions, used when counting per creator. This is replaced regularly */
|
||||
public static List<TransactionData> unconfirmedTransactionsCache = null;
|
||||
|
||||
public TransactionImporter() {
|
||||
signatureMessageScheduler.scheduleAtFixedRate(this::processNetworkTransactionSignaturesMessage, 60, 1, TimeUnit.SECONDS);
|
||||
getTransactionMessageScheduler.scheduleAtFixedRate(this::processNetworkGetTransactionMessages, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static synchronized TransactionImporter getInstance() {
|
||||
if (instance == null) {
|
||||
@@ -371,36 +380,104 @@ public class TransactionImporter extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> getTransactionMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object getTransactionMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService getTransactionMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetTransactionMessage(Peer peer, Message message) {
|
||||
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message;
|
||||
byte[] signature = getTransactionMessage.getSignature();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
synchronized (getTransactionMessageLock) {
|
||||
getTransactionMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
private void processNetworkGetTransactionMessages() {
|
||||
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (getTransactionMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(getTransactionMessageList);
|
||||
getTransactionMessageList.clear();
|
||||
}
|
||||
|
||||
if( messagesToProcess.isEmpty() ) return;
|
||||
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) peerMessage.getMessage();
|
||||
byte[] signature = getTransactionMessage.getSignature();
|
||||
|
||||
peerMessageBySignature58.put(Base58.encode(signature), peerMessage);
|
||||
}
|
||||
|
||||
// Firstly check the sig-valid transactions that are currently queued for import
|
||||
TransactionData transactionData = this.getCachedSigValidTransactions().stream()
|
||||
.filter(t -> Arrays.equals(signature, t.getSignature()))
|
||||
.findFirst().orElse(null);
|
||||
Map<String, TransactionData> transactionsCachedBySignature58
|
||||
= this.getCachedSigValidTransactions().stream()
|
||||
.collect(Collectors.toMap(t -> Base58.encode(t.getSignature()), Function.identity()));
|
||||
|
||||
if (transactionData == null) {
|
||||
Map<Boolean, List<Map.Entry<String, PeerMessage>>> transactionsCachedBySignature58Partition
|
||||
= peerMessageBySignature58.entrySet().stream()
|
||||
.collect(Collectors.partitioningBy(entry -> transactionsCachedBySignature58.containsKey(entry.getKey())));
|
||||
|
||||
List<byte[]> signaturesNeeded
|
||||
= transactionsCachedBySignature58Partition.get(false).stream()
|
||||
.map(Map.Entry::getValue)
|
||||
.map(PeerMessage::getMessage)
|
||||
.map(message -> (GetTransactionMessage) message)
|
||||
.map(GetTransactionMessage::getSignature)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// transaction found in the import queue
|
||||
Map<String, TransactionData> transactionsToSendBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
for( Map.Entry<String, PeerMessage> entry : transactionsCachedBySignature58Partition.get(true)) {
|
||||
transactionsToSendBySignature58.put(entry.getKey(), transactionsCachedBySignature58.get(entry.getKey()));
|
||||
}
|
||||
|
||||
if( !signaturesNeeded.isEmpty() ) {
|
||||
// Not found in import queue, so try the database
|
||||
transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
transactionsToSendBySignature58.putAll(
|
||||
repository.getTransactionRepository().fromSignatures(signaturesNeeded).stream()
|
||||
.collect(Collectors.toMap(transactionData -> Base58.encode(transactionData.getSignature()), Function.identity()))
|
||||
);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
if (transactionData == null) {
|
||||
// Still not found - so we don't have this transaction
|
||||
LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
|
||||
// Send no response at all???
|
||||
return;
|
||||
}
|
||||
for( final Map.Entry<String, TransactionData> entry : transactionsToSendBySignature58.entrySet() ) {
|
||||
|
||||
Message transactionMessage = new TransactionMessage(transactionData);
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(entry.getKey());
|
||||
final Message message = peerMessage.getMessage();
|
||||
final Peer peer = peerMessage.getPeer();
|
||||
|
||||
Runnable sendTransactionMessageRunner = () -> sendTransactionMessage(entry.getKey(), entry.getValue(), message, peer);
|
||||
Thread sendTransactionMessageThread = new Thread(sendTransactionMessageRunner);
|
||||
sendTransactionMessageThread.start();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(),e);
|
||||
}
|
||||
}
|
||||
|
||||
private static void sendTransactionMessage(String signature58, TransactionData data, Message message, Peer peer) {
|
||||
try {
|
||||
Message transactionMessage = new TransactionMessage(data);
|
||||
transactionMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(transactionMessage))
|
||||
peer.disconnect("failed to send transaction");
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
}
|
||||
catch (TransformationException e) {
|
||||
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", signature58, peer), e);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,44 +498,86 @@ public class TransactionImporter extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> signatureMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object signatureMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService signatureMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkTransactionSignaturesMessage(Peer peer, Message message) {
|
||||
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message;
|
||||
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
|
||||
synchronized (signatureMessageLock) {
|
||||
signatureMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (byte[] signature : signatures) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
|
||||
// Previously invalid transaction - don't keep requesting it
|
||||
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
|
||||
continue;
|
||||
}
|
||||
public void processNetworkTransactionSignaturesMessage() {
|
||||
|
||||
// Ignore if this transaction is in the queue
|
||||
if (incomingTransactionQueueContains(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (signatureMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(signatureMessageList);
|
||||
signatureMessageList.clear();
|
||||
}
|
||||
|
||||
// Do we have it already? (Before requesting transaction data itself)
|
||||
if (repository.getTransactionRepository().exists(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size() * 10);
|
||||
Map<String, Peer> peerBySignature58 = new HashMap<>( messagesToProcess.size() * 10 );
|
||||
|
||||
// Check isInterrupted() here and exit fast
|
||||
if (Thread.currentThread().isInterrupted())
|
||||
return;
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
|
||||
// Fetch actual transaction data from peer
|
||||
Message getTransactionMessage = new GetTransactionMessage(signature);
|
||||
if (!peer.sendMessage(getTransactionMessage)) {
|
||||
peer.disconnect("failed to request transaction");
|
||||
return;
|
||||
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) peerMessage.getMessage();
|
||||
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
|
||||
|
||||
for (byte[] signature : signatures) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
|
||||
// Previously invalid transaction - don't keep requesting it
|
||||
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore if this transaction is in the queue
|
||||
if (incomingTransactionQueueContains(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peerMessage.getPeer()));
|
||||
continue;
|
||||
}
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
peerBySignature58.put(signature58, peerMessage.getPeer());
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e);
|
||||
|
||||
if( !signatureBySignature58.isEmpty() ) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// remove signatures in db already
|
||||
repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.map(TransactionData::getSignature)
|
||||
.map(signature -> Base58.encode(signature))
|
||||
.forEach(signature58 -> signatureBySignature58.remove(signature58));
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer"), e);
|
||||
}
|
||||
}
|
||||
|
||||
// Check isInterrupted() here and exit fast
|
||||
if (Thread.currentThread().isInterrupted())
|
||||
return;
|
||||
|
||||
for (Map.Entry<String, byte[]> entry : signatureBySignature58.entrySet()) {
|
||||
|
||||
Peer peer = peerBySignature58.get(entry.getKey());
|
||||
|
||||
// Fetch actual transaction data from peer
|
||||
Message getTransactionMessage = new GetTransactionMessage(entry.getValue());
|
||||
if (peer != null && !peer.sendMessage(getTransactionMessage)) {
|
||||
peer.disconnect("failed to request transaction");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2,22 +2,30 @@ package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.event.DataMonitorEvent;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ArbitraryDataCacheManager extends Thread {
|
||||
|
||||
@@ -29,6 +37,11 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
/** Queue of arbitrary transactions that require cache updates */
|
||||
private final List<ArbitraryTransactionData> updateQueue = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
private static final NumberFormat FORMATTER = NumberFormat.getNumberInstance();
|
||||
|
||||
static {
|
||||
FORMATTER.setGroupingUsed(true);
|
||||
}
|
||||
|
||||
public static synchronized ArbitraryDataCacheManager getInstance() {
|
||||
if (instance == null) {
|
||||
@@ -45,17 +58,22 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
|
||||
try {
|
||||
while (!Controller.isStopping()) {
|
||||
Thread.sleep(500L);
|
||||
try {
|
||||
Thread.sleep(500L);
|
||||
|
||||
// Process queue
|
||||
processResourceQueue();
|
||||
// Process queue
|
||||
processResourceQueue();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
Thread.sleep(600_000L); // wait 10 minutes to continue
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit thread
|
||||
}
|
||||
|
||||
// Clear queue before terminating thread
|
||||
processResourceQueue();
|
||||
// Clear queue before terminating thread
|
||||
processResourceQueue();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
@@ -85,14 +103,25 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
// Update arbitrary resource caches
|
||||
try {
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
arbitraryTransaction.updateArbitraryResourceCache(repository);
|
||||
arbitraryTransaction.updateArbitraryMetadataCache(repository);
|
||||
arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, new HashSet<>(0), new HashMap<>(0));
|
||||
repository.saveChanges();
|
||||
|
||||
// Update status as separate commit, as this is more prone to failure
|
||||
arbitraryTransaction.updateArbitraryResourceStatus(repository);
|
||||
repository.saveChanges();
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
transactionData.getIdentifier(),
|
||||
transactionData.getName(),
|
||||
transactionData.getService().name(),
|
||||
"updated resource cache and status, queue",
|
||||
transactionData.getTimestamp(),
|
||||
transactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
|
||||
LOGGER.debug(() -> String.format("Finished processing transaction %.8s in arbitrary resource queue...", Base58.encode(transactionData.getSignature())));
|
||||
|
||||
} catch (DataException e) {
|
||||
@@ -103,6 +132,9 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue while processing arbitrary resource cache updates", e);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void addToUpdateQueue(ArbitraryTransactionData transactionData) {
|
||||
@@ -148,34 +180,66 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
LOGGER.info("Building arbitrary resources cache...");
|
||||
SplashFrame.getInstance().updateStatus("Building QDN cache - please wait...");
|
||||
|
||||
final int batchSize = 100;
|
||||
final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
|
||||
int offset = 0;
|
||||
|
||||
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository().getLatestArbitraryTransactions();
|
||||
|
||||
LOGGER.info("arbitrary transactions: count = " + allArbitraryTransactionsInDescendingOrder.size());
|
||||
|
||||
List<ArbitraryResourceData> resources = repository.getArbitraryRepository().getArbitraryResources(null, null, true);
|
||||
|
||||
Map<ArbitraryTransactionDataHashWrapper, ArbitraryResourceData> resourceByWrapper = new HashMap<>(resources.size());
|
||||
for( ArbitraryResourceData resource : resources ) {
|
||||
resourceByWrapper.put(
|
||||
new ArbitraryTransactionDataHashWrapper(resource.service.value, resource.name, resource.identifier),
|
||||
resource
|
||||
);
|
||||
}
|
||||
|
||||
LOGGER.info("arbitrary resources: count = " + resourceByWrapper.size());
|
||||
|
||||
Set<ArbitraryTransactionDataHashWrapper> latestTransactionsWrapped = new HashSet<>(allArbitraryTransactionsInDescendingOrder.size());
|
||||
|
||||
// Loop through all ARBITRARY transactions, and determine latest state
|
||||
while (!Controller.isStopping()) {
|
||||
LOGGER.info("Fetching arbitrary transactions {} - {}", offset, offset+batchSize-1);
|
||||
LOGGER.info(
|
||||
"Fetching arbitrary transactions {} - {} / {} Total",
|
||||
FORMATTER.format(offset),
|
||||
FORMATTER.format(offset+batchSize-1),
|
||||
FORMATTER.format(allArbitraryTransactionsInDescendingOrder.size())
|
||||
);
|
||||
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, List.of(Transaction.TransactionType.ARBITRARY), null, null, null, TransactionsResource.ConfirmationStatus.BOTH, batchSize, offset, false);
|
||||
if (signatures.isEmpty()) {
|
||||
List<ArbitraryTransactionData> transactionsToProcess
|
||||
= allArbitraryTransactionsInDescendingOrder.stream()
|
||||
.skip(offset)
|
||||
.limit(batchSize)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (transactionsToProcess.isEmpty()) {
|
||||
// Complete
|
||||
break;
|
||||
}
|
||||
|
||||
// Expand signatures to transactions
|
||||
for (byte[] signature : signatures) {
|
||||
ArbitraryTransactionData transactionData = (ArbitraryTransactionData) repository
|
||||
.getTransactionRepository().fromSignature(signature);
|
||||
try {
|
||||
for( ArbitraryTransactionData transactionData : transactionsToProcess) {
|
||||
if (transactionData.getService() == null) {
|
||||
// Unsupported service - ignore this resource
|
||||
continue;
|
||||
}
|
||||
|
||||
if (transactionData.getService() == null) {
|
||||
// Unsupported service - ignore this resource
|
||||
continue;
|
||||
latestTransactionsWrapped.add(new ArbitraryTransactionDataHashWrapper(transactionData));
|
||||
|
||||
// Update arbitrary resource caches
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, latestTransactionsWrapped, resourceByWrapper);
|
||||
}
|
||||
|
||||
// Update arbitrary resource caches
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
arbitraryTransaction.updateArbitraryResourceCache(repository);
|
||||
arbitraryTransaction.updateArbitraryMetadataCache(repository);
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
repository.discardChanges();
|
||||
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
offset += batchSize;
|
||||
}
|
||||
@@ -193,6 +257,11 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
repository.discardChanges();
|
||||
throw new DataException("Build of arbitrary resources cache failed.");
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean refreshArbitraryStatuses(Repository repository) throws DataException {
|
||||
@@ -200,27 +269,48 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
LOGGER.info("Refreshing arbitrary resource statuses for locally hosted transactions...");
|
||||
SplashFrame.getInstance().updateStatus("Refreshing statuses - please wait...");
|
||||
|
||||
final int batchSize = 100;
|
||||
final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
|
||||
int offset = 0;
|
||||
|
||||
List<ArbitraryTransactionData> allHostedTransactions
|
||||
= ArbitraryDataStorageManager.getInstance()
|
||||
.listAllHostedTransactions(repository, null, null);
|
||||
|
||||
// Loop through all ARBITRARY transactions, and determine latest state
|
||||
while (!Controller.isStopping()) {
|
||||
LOGGER.info("Fetching hosted transactions {} - {}", offset, offset+batchSize-1);
|
||||
LOGGER.info(
|
||||
"Fetching hosted transactions {} - {} / {} Total",
|
||||
FORMATTER.format(offset),
|
||||
FORMATTER.format(offset+batchSize-1),
|
||||
FORMATTER.format(allHostedTransactions.size())
|
||||
);
|
||||
|
||||
List<ArbitraryTransactionData> hostedTransactions
|
||||
= allHostedTransactions.stream()
|
||||
.skip(offset)
|
||||
.limit(batchSize)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
List<ArbitraryTransactionData> hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository, batchSize, offset);
|
||||
if (hostedTransactions.isEmpty()) {
|
||||
// Complete
|
||||
break;
|
||||
}
|
||||
|
||||
// Loop through hosted transactions
|
||||
for (ArbitraryTransactionData transactionData : hostedTransactions) {
|
||||
try {
|
||||
// Loop through hosted transactions
|
||||
for (ArbitraryTransactionData transactionData : hostedTransactions) {
|
||||
|
||||
// Determine status and update cache
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
arbitraryTransaction.updateArbitraryResourceStatus(repository);
|
||||
// Determine status and update cache
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
arbitraryTransaction.updateArbitraryResourceStatus(repository);
|
||||
}
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
repository.discardChanges();
|
||||
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
|
||||
offset += batchSize;
|
||||
}
|
||||
|
||||
@@ -234,6 +324,11 @@ public class ArbitraryDataCacheManager extends Thread {
|
||||
repository.discardChanges();
|
||||
throw new DataException("Refresh of arbitrary resource statuses failed.");
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -2,9 +2,10 @@ package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.event.DataMonitorEvent;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
@@ -21,8 +22,12 @@ import java.nio.file.Paths;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD;
|
||||
|
||||
@@ -77,6 +82,19 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
final int limit = 100;
|
||||
int offset = 0;
|
||||
|
||||
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository()
|
||||
.getLatestArbitraryTransactions();
|
||||
} catch( Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
Set<ArbitraryTransactionData> processedTransactions = new HashSet<>();
|
||||
|
||||
try {
|
||||
while (!isStopping) {
|
||||
Thread.sleep(30000);
|
||||
@@ -107,27 +125,31 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
// Any arbitrary transactions we want to fetch data for?
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
|
||||
// LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
|
||||
List<ArbitraryTransactionData> transactions = allArbitraryTransactionsInDescendingOrder.stream().skip(offset).limit(limit).collect(Collectors.toList());
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (signatures == null || signatures.isEmpty()) {
|
||||
if (transactions == null || transactions.isEmpty()) {
|
||||
offset = 0;
|
||||
continue;
|
||||
allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository()
|
||||
.getLatestArbitraryTransactions();
|
||||
transactions = allArbitraryTransactionsInDescendingOrder.stream().limit(limit).collect(Collectors.toList());
|
||||
processedTransactions.clear();
|
||||
}
|
||||
|
||||
offset += limit;
|
||||
now = NTP.getTime();
|
||||
|
||||
// Loop through the signatures in this batch
|
||||
for (int i=0; i<signatures.size(); i++) {
|
||||
for (int i=0; i<transactions.size(); i++) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] signature = signatures.get(i);
|
||||
if (signature == null) {
|
||||
ArbitraryTransactionData arbitraryTransactionData = transactions.get(i);
|
||||
if (arbitraryTransactionData == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -136,9 +158,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
Thread.sleep(5000);
|
||||
}
|
||||
|
||||
// Fetch the transaction data
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null || arbitraryTransactionData.getService() == null) {
|
||||
if (arbitraryTransactionData.getService() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -147,6 +167,8 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean mostRecentTransaction = processedTransactions.add(arbitraryTransactionData);
|
||||
|
||||
// Check if we have the complete file
|
||||
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
|
||||
|
||||
@@ -167,20 +189,54 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
LOGGER.info("Deleting transaction {} because we can't host its data",
|
||||
Base58.encode(arbitraryTransactionData.getSignature()));
|
||||
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"can't store data, deleting",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check to see if we have had a more recent PUT
|
||||
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
if (hasMoreRecentPutTransaction) {
|
||||
if (!mostRecentTransaction) {
|
||||
// There is a more recent PUT transaction than the one we are currently processing.
|
||||
// When a PUT is issued, it replaces any layers that would have been there before.
|
||||
// Therefore any data relating to this older transaction is no longer needed.
|
||||
LOGGER.info(String.format("Newer PUT found for %s %s since transaction %s. " +
|
||||
"Deleting all files associated with the earlier transaction.", arbitraryTransactionData.getService(),
|
||||
arbitraryTransactionData.getName(), Base58.encode(signature)));
|
||||
arbitraryTransactionData.getName(), Base58.encode(arbitraryTransactionData.getSignature())));
|
||||
|
||||
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
|
||||
|
||||
Optional<ArbitraryTransactionData> moreRecentPutTransaction
|
||||
= processedTransactions.stream()
|
||||
.filter(data -> data.equals(arbitraryTransactionData))
|
||||
.findAny();
|
||||
|
||||
if( moreRecentPutTransaction.isPresent() ) {
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"deleting data due to replacement",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
moreRecentPutTransaction.get().getTimestamp()
|
||||
)
|
||||
);
|
||||
}
|
||||
else {
|
||||
LOGGER.warn("Something went wrong with the most recent put transaction determination!");
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -199,7 +255,21 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
LOGGER.debug(String.format("Transaction %s has complete file and all chunks",
|
||||
Base58.encode(arbitraryTransactionData.getSignature())));
|
||||
|
||||
ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
|
||||
boolean wasDeleted = ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
|
||||
|
||||
if( wasDeleted ) {
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"deleting file, retaining chunks",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -237,17 +307,6 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
this.storageLimitReached(repository);
|
||||
}
|
||||
|
||||
// Delete random data associated with name if we're over our storage limit for this name
|
||||
// Use the DELETION_THRESHOLD, for the same reasons as above
|
||||
for (String followedName : ListUtils.followedNames()) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
if (!storageManager.isStorageSpaceAvailableForName(repository, followedName, DELETION_THRESHOLD)) {
|
||||
this.storageLimitReachedForName(repository, followedName);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when cleaning up arbitrary transaction data", e);
|
||||
}
|
||||
@@ -326,25 +385,6 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
// FUTURE: consider reducing the expiry time of the reader cache
|
||||
}
|
||||
|
||||
public void storageLimitReachedForName(Repository repository, String name) throws InterruptedException {
|
||||
// We think that the storage limit has been reached for supplied name - but we should double check
|
||||
if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailableForName(repository, name, DELETION_THRESHOLD)) {
|
||||
// We have space available for this name, so don't delete anything
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete a batch of random chunks associated with this name
|
||||
// This reduces the chance of too many nodes deleting the same chunk
|
||||
// when they reach their storage limit
|
||||
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
|
||||
for (int i=0; i<CHUNK_DELETION_BATCH_SIZE; i++) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
this.deleteRandomFile(repository, dataPath.toFile(), name);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Iteratively walk through given directory and delete a single random file
|
||||
*
|
||||
@@ -423,6 +463,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
}
|
||||
|
||||
LOGGER.info("Deleting random file {} because we have reached max storage capacity...", randomItem.toString());
|
||||
fireRandomItemDeletionNotification(randomItem, repository, "Deleting random file, because we have reached max storage capacity");
|
||||
boolean success = randomItem.delete();
|
||||
if (success) {
|
||||
try {
|
||||
@@ -437,6 +478,35 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
return false;
|
||||
}
|
||||
|
||||
private void fireRandomItemDeletionNotification(File randomItem, Repository repository, String reason) {
|
||||
try {
|
||||
Path parentFileNamePath = randomItem.toPath().toAbsolutePath().getParent().getFileName();
|
||||
if (parentFileNamePath != null) {
|
||||
String signature58 = parentFileNamePath.toString();
|
||||
byte[] signature = Base58.decode(signature58);
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData != null && transactionData.getType() == Transaction.TransactionType.ARBITRARY) {
|
||||
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
reason,
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanupTempDirectory(String folder, long now, long minAge) {
|
||||
String baseDir = Settings.getInstance().getTempDataPath();
|
||||
Path tempDir = Paths.get(baseDir, folder);
|
||||
|
@@ -0,0 +1,21 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
public class ArbitraryDataExamination {
|
||||
|
||||
private boolean pass;
|
||||
|
||||
private String notes;
|
||||
|
||||
public ArbitraryDataExamination(boolean pass, String notes) {
|
||||
this.pass = pass;
|
||||
this.notes = notes;
|
||||
}
|
||||
|
||||
public boolean isPass() {
|
||||
return pass;
|
||||
}
|
||||
|
||||
public String getNotes() {
|
||||
return notes;
|
||||
}
|
||||
}
|
@@ -25,6 +25,10 @@ import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES;
|
||||
|
||||
@@ -73,6 +77,8 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
|
||||
private ArbitraryDataFileListManager() {
|
||||
getArbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
|
||||
arbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFileListManager getInstance() {
|
||||
@@ -118,8 +124,8 @@ public class ArbitraryDataFileListManager {
|
||||
if (timeSinceLastAttempt > 15 * 1000L) {
|
||||
// We haven't tried for at least 15 seconds
|
||||
|
||||
if (networkBroadcastCount < 3) {
|
||||
// We've made less than 3 total attempts
|
||||
if (networkBroadcastCount < 12) {
|
||||
// We've made less than 12 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -128,8 +134,8 @@ public class ArbitraryDataFileListManager {
|
||||
if (timeSinceLastAttempt > 60 * 1000L) {
|
||||
// We haven't tried for at least 1 minute
|
||||
|
||||
if (networkBroadcastCount < 8) {
|
||||
// We've made less than 8 total attempts
|
||||
if (networkBroadcastCount < 40) {
|
||||
// We've made less than 40 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -396,11 +402,11 @@ public class ArbitraryDataFileListManager {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void deleteFileListRequestsForSignature(byte[] signature) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
public void deleteFileListRequestsForSignature(String signature58) {
|
||||
|
||||
for (Iterator<Map.Entry<Integer, Triple<String, Peer, Long>>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) {
|
||||
Map.Entry<Integer, Triple<String, Peer, Long>> entry = it.next();
|
||||
if (entry == null || entry.getKey() == null || entry.getValue() != null) {
|
||||
if (entry == null || entry.getKey() == null || entry.getValue() == null) {
|
||||
continue;
|
||||
}
|
||||
if (Objects.equals(entry.getValue().getA(), signature58)) {
|
||||
@@ -413,70 +419,116 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
// Network handlers
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> arbitraryDataFileListMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object arbitraryDataFileListMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService arbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkArbitraryDataFileListMessage(Peer peer, Message message) {
|
||||
// Don't process if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
|
||||
|
||||
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
|
||||
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
|
||||
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
|
||||
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
synchronized (arbitraryDataFileListMessageLock) {
|
||||
arbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have a pending request for this data?
|
||||
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
|
||||
if (request == null || request.getA() == null) {
|
||||
return;
|
||||
}
|
||||
boolean isRelayRequest = (request.getB() != null);
|
||||
private void processNetworkArbitraryDataFileListMessage() {
|
||||
|
||||
// Does this message's signature match what we're expecting?
|
||||
byte[] signature = arbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (!request.getA().equals(signature58)) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (arbitraryDataFileListMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(arbitraryDataFileListMessageList);
|
||||
arbitraryDataFileListMessageList.clear();
|
||||
}
|
||||
|
||||
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
|
||||
if (hashes == null || hashes.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
if (messagesToProcess.isEmpty()) return;
|
||||
|
||||
ArbitraryTransactionData arbitraryTransactionData = null;
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Boolean> isRelayRequestBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, List<byte[]>> hashesBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Triple<String, Peer, Long>> requestBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Check transaction exists and hashes are correct
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (!(transactionData instanceof ArbitraryTransactionData))
|
||||
for (PeerMessage peerMessage : messagesToProcess) {
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
|
||||
|
||||
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
|
||||
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
|
||||
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
|
||||
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
|
||||
// Do we have a pending request for this data?
|
||||
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
|
||||
if (request == null || request.getA() == null) {
|
||||
continue;
|
||||
}
|
||||
boolean isRelayRequest = (request.getB() != null);
|
||||
|
||||
// Does this message's signature match what we're expecting?
|
||||
byte[] signature = arbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (!request.getA().equals(signature58)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
|
||||
if (hashes == null || hashes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
peerMessageBySignature58.put(signature58, peerMessage);
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
isRelayRequestBySignature58.put(signature58, isRelayRequest);
|
||||
hashesBySignature58.put(signature58, hashes);
|
||||
requestBySignature58.put(signature58, request);
|
||||
}
|
||||
|
||||
if (signatureBySignature58.isEmpty()) return;
|
||||
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList;
|
||||
|
||||
// Check transaction exists and hashes are correct
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
arbitraryTransactionDataList
|
||||
= repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(data -> (ArbitraryTransactionData) data)
|
||||
.collect(Collectors.toList());
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
|
||||
for (ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList) {
|
||||
|
||||
// // Load data file(s)
|
||||
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
//
|
||||
// // Check all hashes exist
|
||||
// for (byte[] hash : hashes) {
|
||||
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
|
||||
// if (!arbitraryDataFile.containsChunk(hash)) {
|
||||
// // Check the hash against the complete file
|
||||
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
|
||||
// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
|
||||
// return;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
|
||||
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
|
||||
Long now = NTP.getTime();
|
||||
List<byte[]> hashes = hashesBySignature58.get(signature58);
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
|
||||
Boolean isRelayRequest = isRelayRequestBySignature58.get(signature58);
|
||||
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
|
||||
Long now = NTP.getTime();
|
||||
|
||||
if (ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) {
|
||||
// Keep track of the hashes this peer reports to have access to
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
@@ -487,233 +539,300 @@ public class ArbitraryDataFileListManager {
|
||||
ArbitraryFileListResponseInfo responseInfo = new ArbitraryFileListResponseInfo(hash58, signature58,
|
||||
peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops);
|
||||
|
||||
ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo);
|
||||
ArbitraryDataFileManager.getInstance().addResponse(responseInfo);
|
||||
}
|
||||
|
||||
// Keep track of the source peer, for direct connections
|
||||
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
|
||||
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
|
||||
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of the source peer, for direct connections
|
||||
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
|
||||
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
|
||||
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
|
||||
}
|
||||
}
|
||||
// Forwarding
|
||||
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e);
|
||||
}
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
|
||||
if (!isBlocked) {
|
||||
Triple<String, Peer, Long> request = requestBySignature58.get(signature58);
|
||||
Peer requestingPeer = request.getB();
|
||||
if (requestingPeer != null) {
|
||||
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
|
||||
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
|
||||
|
||||
// Forwarding
|
||||
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
|
||||
if (!isBlocked) {
|
||||
Peer requestingPeer = request.getB();
|
||||
if (requestingPeer != null) {
|
||||
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
|
||||
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
|
||||
// Add each hash to our local mapping so we know who to ask later
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
|
||||
}
|
||||
|
||||
// Add each hash to our local mapping so we know who to ask later
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
|
||||
}
|
||||
// Bump requestHops if it exists
|
||||
if (requestHops != null) {
|
||||
requestHops++;
|
||||
}
|
||||
|
||||
// Bump requestHops if it exists
|
||||
if (requestHops != null) {
|
||||
requestHops++;
|
||||
}
|
||||
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
|
||||
|
||||
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file list");
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
requestingPeer.sendMessage(forwardArbitraryDataFileListMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> getArbitraryDataFileListMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object getArbitraryDataFileListMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService getArbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetArbitraryDataFileListMessage(Peer peer, Message message) {
|
||||
// Don't respond if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
byte[] signature = getArbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
return;
|
||||
synchronized (getArbitraryDataFileListMessageLock) {
|
||||
getArbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
|
||||
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
|
||||
private void processNetworkGetArbitraryDataFileListMessage() {
|
||||
|
||||
if (requestingPeer != null) {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (getArbitraryDataFileListMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(getArbitraryDataFileListMessageList);
|
||||
getArbitraryDataFileListMessageList.clear();
|
||||
}
|
||||
|
||||
List<byte[]> hashes = new ArrayList<>();
|
||||
ArbitraryTransactionData transactionData = null;
|
||||
boolean allChunksExist = false;
|
||||
boolean hasMetadata = false;
|
||||
if (messagesToProcess.isEmpty()) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, List<byte[]>> requestedHashesBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, String> requestingPeerBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Long> nowBySignature58 = new HashMap<>((messagesToProcess.size()));
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Firstly we need to lookup this file on chain to get a list of its hashes
|
||||
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData instanceof ArbitraryTransactionData) {
|
||||
for (PeerMessage messagePeer : messagesToProcess) {
|
||||
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
|
||||
|
||||
Message message = messagePeer.message;
|
||||
Peer peer = messagePeer.peer;
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
byte[] signature = getArbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
continue;
|
||||
}
|
||||
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
|
||||
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
|
||||
|
||||
if (requestingPeer != null) {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
|
||||
} else {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
|
||||
}
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
requestedHashesBySignature58.put(signature58, requestedHashes);
|
||||
requestingPeerBySignature58.put(signature58, requestingPeer);
|
||||
nowBySignature58.put(signature58, now);
|
||||
peerMessageBySignature58.put(signature58, messagePeer);
|
||||
}
|
||||
|
||||
if (signatureBySignature58.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<byte[]> hashes = new ArrayList<>();
|
||||
boolean allChunksExist = false;
|
||||
boolean hasMetadata = false;
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get a list of its hashes
|
||||
transactionDataList
|
||||
= repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(data -> (ArbitraryTransactionData) data)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
for (ArbitraryTransactionData transactionData : transactionDataList) {
|
||||
byte[] signature = transactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
List<byte[]> requestedHashes = requestedHashesBySignature58.get(signature58);
|
||||
|
||||
// Check if we're even allowed to serve data for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
try {
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
requestedHashes = new ArrayList<>();
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
requestedHashes = new ArrayList<>();
|
||||
|
||||
// Add the metadata file
|
||||
if (arbitraryDataFile.getMetadataHash() != null) {
|
||||
requestedHashes.add(arbitraryDataFile.getMetadataHash());
|
||||
hasMetadata = true;
|
||||
// Add the metadata file
|
||||
if (arbitraryDataFile.getMetadataHash() != null) {
|
||||
requestedHashes.add(arbitraryDataFile.getMetadataHash());
|
||||
hasMetadata = true;
|
||||
}
|
||||
|
||||
// Add the chunk hashes
|
||||
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
|
||||
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
|
||||
}
|
||||
// Add complete file if there are no hashes
|
||||
else {
|
||||
requestedHashes.add(arbitraryDataFile.getHash());
|
||||
}
|
||||
}
|
||||
|
||||
// Add the chunk hashes
|
||||
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
|
||||
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
|
||||
}
|
||||
// Add complete file if there are no hashes
|
||||
else {
|
||||
requestedHashes.add(arbitraryDataFile.getHash());
|
||||
|
||||
// Assume all chunks exists, unless one can't be found below
|
||||
allChunksExist = true;
|
||||
|
||||
for (byte[] requestedHash : requestedHashes) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
|
||||
if (chunk.exists()) {
|
||||
hashes.add(chunk.getHash());
|
||||
//LOGGER.trace("Added hash {}", chunk.getHash58());
|
||||
} else {
|
||||
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
|
||||
allChunksExist = false;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
|
||||
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
|
||||
if (hasMetadata && hashes.size() == 1) {
|
||||
hashes.clear();
|
||||
}
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
Long now = nowBySignature58.get(signature58);
|
||||
|
||||
// We should only respond if we have at least one hash
|
||||
String requestingPeer = requestingPeerBySignature58.get(signature58);
|
||||
if (!hashes.isEmpty()) {
|
||||
|
||||
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
|
||||
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
|
||||
|
||||
// We have all the chunks, so update requests map to reflect that we've sent it
|
||||
// There is no need to keep track of the request, as we can serve all the chunks
|
||||
if (allChunksExist) {
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, now);
|
||||
arbitraryDataFileListRequests.put(message.getId(), newEntry);
|
||||
}
|
||||
|
||||
// Assume all chunks exists, unless one can't be found below
|
||||
allChunksExist = true;
|
||||
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
|
||||
hashes, NTP.getTime(), 0, ourAddress, true);
|
||||
}
|
||||
|
||||
arbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
LOGGER.debug("Couldn't send list of hashes");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (allChunksExist) {
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because file list request is fully served");
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
|
||||
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
for (byte[] requestedHash : requestedHashes) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
|
||||
if (chunk.exists()) {
|
||||
hashes.add(chunk.getHash());
|
||||
//LOGGER.trace("Added hash {}", chunk.getHash58());
|
||||
} else {
|
||||
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
|
||||
allChunksExist = false;
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
} else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
|
||||
}
|
||||
|
||||
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
|
||||
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
|
||||
if (hasMetadata && hashes.size() == 1) {
|
||||
hashes.clear();
|
||||
}
|
||||
|
||||
// We should only respond if we have at least one hash
|
||||
if (!hashes.isEmpty()) {
|
||||
|
||||
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
|
||||
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
|
||||
|
||||
// We have all the chunks, so update requests map to reflect that we've sent it
|
||||
// There is no need to keep track of the request, as we can serve all the chunks
|
||||
if (allChunksExist) {
|
||||
newEntry = new Triple<>(null, null, now);
|
||||
arbitraryDataFileListRequests.put(message.getId(), newEntry);
|
||||
}
|
||||
|
||||
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
|
||||
hashes, NTP.getTime(), 0, ourAddress, true);
|
||||
}
|
||||
|
||||
arbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
LOGGER.debug("Couldn't send list of hashes");
|
||||
peer.disconnect("failed to send list of hashes");
|
||||
return;
|
||||
}
|
||||
LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
|
||||
|
||||
if (allChunksExist) {
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because file list request is fully served");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
}
|
||||
else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
}
|
||||
else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
@@ -12,6 +13,7 @@ import org.qortal.data.network.PeerData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.PeerSendManagement;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
@@ -23,12 +25,16 @@ import org.qortal.utils.NTP;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
public static final int SEND_TIMEOUT_MS = 500;
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class);
|
||||
|
||||
private static ArbitraryDataFileManager instance;
|
||||
@@ -48,7 +54,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
/**
|
||||
* List to keep track of any arbitrary data file hash responses
|
||||
*/
|
||||
public final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
private final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
/**
|
||||
* List to keep track of peers potentially available for direct connections, based on recent requests
|
||||
@@ -65,8 +71,9 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
public static int MAX_FILE_HASH_RESPONSES = 1000;
|
||||
|
||||
|
||||
private ArbitraryDataFileManager() {
|
||||
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate( this::processResponses, 60, 1, TimeUnit.SECONDS);
|
||||
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate(this::handleFileListRequestProcess, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFileManager getInstance() {
|
||||
@@ -76,18 +83,13 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
return instance;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Arbitrary Data File Manager");
|
||||
|
||||
try {
|
||||
// Use a fixed thread pool to execute the arbitrary data file requests
|
||||
int threadCount = 5;
|
||||
ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
|
||||
for (int i = 0; i < threadCount; i++) {
|
||||
arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
|
||||
}
|
||||
|
||||
while (!isStopping) {
|
||||
// Nothing to do yet
|
||||
Thread.sleep(1000);
|
||||
@@ -112,7 +114,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
|
||||
arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
|
||||
arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp);
|
||||
|
||||
final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
|
||||
directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
|
||||
@@ -125,8 +126,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
// Fetch data files by hash
|
||||
|
||||
public boolean fetchArbitraryDataFiles(Repository repository,
|
||||
Peer peer,
|
||||
public boolean fetchArbitraryDataFiles(Peer peer,
|
||||
byte[] signature,
|
||||
ArbitraryTransactionData arbitraryTransactionData,
|
||||
List<byte[]> hashes) throws DataException {
|
||||
@@ -146,21 +146,15 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
|
||||
LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
|
||||
Long startTime = NTP.getTime();
|
||||
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, arbitraryTransactionData, signature, hash, null);
|
||||
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, arbitraryTransactionData, signature, hash);
|
||||
Long endTime = NTP.getTime();
|
||||
if (receivedArbitraryDataFile != null) {
|
||||
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
|
||||
receivedAtLeastOneFile = true;
|
||||
|
||||
// Remove this hash from arbitraryDataFileHashResponses now that we have received it
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime));
|
||||
|
||||
// Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
|
||||
// Stop asking for files from this peer
|
||||
break;
|
||||
}
|
||||
@@ -169,10 +163,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Remove this hash from arbitraryDataFileHashResponses because we have a local copy
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
}
|
||||
}
|
||||
|
||||
if (receivedAtLeastOneFile) {
|
||||
@@ -191,14 +181,103 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
return receivedAtLeastOneFile;
|
||||
}
|
||||
|
||||
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, Peer requestingPeer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
boolean fileAlreadyExists = existingFile.exists();
|
||||
String hash58 = Base58.encode(hash);
|
||||
// Lock to synchronize access to the list
|
||||
private final Object arbitraryDataFileHashResponseLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService arbitraryDataFileHashResponseScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
|
||||
public void addResponse( ArbitraryFileListResponseInfo responseInfo ) {
|
||||
|
||||
synchronized (arbitraryDataFileHashResponseLock) {
|
||||
this.arbitraryDataFileHashResponses.add(responseInfo);
|
||||
}
|
||||
}
|
||||
|
||||
private void processResponses() {
|
||||
try {
|
||||
List<ArbitraryFileListResponseInfo> responsesToProcess;
|
||||
synchronized (arbitraryDataFileHashResponseLock) {
|
||||
responsesToProcess = new ArrayList<>(arbitraryDataFileHashResponses);
|
||||
arbitraryDataFileHashResponses.clear();
|
||||
}
|
||||
|
||||
if (responsesToProcess.isEmpty()) return;
|
||||
|
||||
Long now = NTP.getTime();
|
||||
|
||||
ArbitraryDataFileRequestThread.getInstance().processFileHashes(now, responsesToProcess, this);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash) throws DataException {
|
||||
ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
// Fetch the file if it doesn't exist locally
|
||||
if (!fileAlreadyExists) {
|
||||
try {
|
||||
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
boolean fileAlreadyExists = existingFile.exists();
|
||||
String hash58 = Base58.encode(hash);
|
||||
|
||||
// Fetch the file if it doesn't exist locally
|
||||
if (!fileAlreadyExists) {
|
||||
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
|
||||
arbitraryDataFileRequests.put(hash58, NTP.getTime());
|
||||
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
|
||||
|
||||
Message response = null;
|
||||
try {
|
||||
response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
|
||||
} catch (InterruptedException e) {
|
||||
// Will return below due to null response
|
||||
}
|
||||
arbitraryDataFileRequests.remove(hash58);
|
||||
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
|
||||
|
||||
if (response == null) {
|
||||
LOGGER.debug("Received null response from peer {}", peer);
|
||||
return null;
|
||||
}
|
||||
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
|
||||
return null;
|
||||
}
|
||||
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
} else {
|
||||
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
|
||||
arbitraryDataFile = existingFile;
|
||||
}
|
||||
|
||||
if (arbitraryDataFile != null) {
|
||||
|
||||
arbitraryDataFile.save();
|
||||
|
||||
// If this is a metadata file then we need to update the cache
|
||||
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
|
||||
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
|
||||
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
|
||||
}
|
||||
}
|
||||
|
||||
// We may need to remove the file list request, if we have all the files for this transaction
|
||||
this.handleFileListRequests(signature);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
arbitraryDataFile = null;
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
private void fetchFileForRelay(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
try {
|
||||
String hash58 = Base58.encode(hash);
|
||||
|
||||
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
|
||||
arbitraryDataFileRequests.put(hash58, NTP.getTime());
|
||||
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
|
||||
@@ -212,73 +291,73 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
arbitraryDataFileRequests.remove(hash58);
|
||||
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
|
||||
|
||||
// We may need to remove the file list request, if we have all the files for this transaction
|
||||
this.handleFileListRequests(signature);
|
||||
|
||||
if (response == null) {
|
||||
LOGGER.debug("Received null response from peer {}", peer);
|
||||
return null;
|
||||
return;
|
||||
}
|
||||
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
|
||||
return null;
|
||||
}
|
||||
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
} else {
|
||||
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
|
||||
arbitraryDataFile = existingFile;
|
||||
}
|
||||
|
||||
if (arbitraryDataFile == null) {
|
||||
// We don't have a file, so give up here
|
||||
return null;
|
||||
}
|
||||
|
||||
// We might want to forward the request to the peer that originally requested it
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
|
||||
|
||||
boolean isRelayRequest = (requestingPeer != null);
|
||||
if (isRelayRequest) {
|
||||
if (!fileAlreadyExists) {
|
||||
// File didn't exist locally before the request, and it's a forwarding request, so delete it if it exists.
|
||||
// It shouldn't exist on the filesystem yet, but leaving this here just in case.
|
||||
arbitraryDataFile.delete(10);
|
||||
}
|
||||
}
|
||||
else {
|
||||
arbitraryDataFile.save();
|
||||
}
|
||||
|
||||
// If this is a metadata file then we need to update the cache
|
||||
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
|
||||
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
|
||||
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
|
||||
}
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
private void handleFileListRequests(byte[] signature) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Fetch the transaction data
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
ArbitraryDataFile arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
|
||||
if (allChunksExist) {
|
||||
// Update requests map to reflect that we've received all chunks
|
||||
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature);
|
||||
if (arbitraryDataFile != null) {
|
||||
|
||||
// We might want to forward the request to the peer that originally requested it
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>();
|
||||
|
||||
// Lock to synchronize access to the list
|
||||
private final Object handleFileListRequestsLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService handleFileListRequestsScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
private void handleFileListRequests(byte[] signature) {
|
||||
|
||||
synchronized (handleFileListRequestsLock) {
|
||||
signatureBySignature58.put(Base58.encode(signature), signature);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleFileListRequestProcess() {
|
||||
|
||||
Map<String, byte[]> signaturesToProcess;
|
||||
|
||||
synchronized (handleFileListRequestsLock) {
|
||||
signaturesToProcess = new HashMap<>(signatureBySignature58);
|
||||
signatureBySignature58.clear();
|
||||
}
|
||||
|
||||
if( signaturesToProcess.isEmpty() ) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Fetch the transaction data
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList
|
||||
= ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signaturesToProcess.values()));
|
||||
|
||||
for( ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList ) {
|
||||
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
|
||||
|
||||
if (completeFileExists) {
|
||||
String signature58 = Base58.encode(arbitraryTransactionData.getSignature());
|
||||
LOGGER.debug("All chunks or complete file exist for transaction {}", signature58);
|
||||
|
||||
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature58);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to handle file list requests: {}", e.getMessage());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,15 +374,14 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
LOGGER.debug("Received arbitrary data file - forwarding is needed");
|
||||
|
||||
// The ID needs to match that of the original request
|
||||
message.setId(originalMessage.getId());
|
||||
try {
|
||||
// The ID needs to match that of the original request
|
||||
message.setId(originalMessage.getId());
|
||||
|
||||
if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
|
||||
PeerSendManagement.getInstance().getOrCreateSendManager(requestingPeer).queueMessage(message, SEND_TIMEOUT_MS);
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -577,13 +655,9 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.debug("Sending file {}...", arbitraryDataFile);
|
||||
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
|
||||
arbitraryDataFileMessage.setId(message.getId());
|
||||
if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
|
||||
peer.disconnect("failed to send file");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Sent file {}", arbitraryDataFile);
|
||||
}
|
||||
|
||||
PeerSendManagement.getInstance().getOrCreateSendManager(peer).queueMessage(arbitraryDataFileMessage, SEND_TIMEOUT_MS);
|
||||
|
||||
}
|
||||
else if (relayInfo != null) {
|
||||
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
|
||||
@@ -595,7 +669,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
|
||||
// No need to pass arbitraryTransactionData below because this is only used for metadata caching,
|
||||
// and metadata isn't retained when relaying.
|
||||
this.fetchArbitraryDataFile(peerToAsk, peer, null, signature, hash, message);
|
||||
this.fetchFileForRelay(peerToAsk, peer, signature, hash, message);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Peer {} not found in relay info", peer);
|
||||
@@ -617,7 +691,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
fileUnknownMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(fileUnknownMessage)) {
|
||||
LOGGER.debug("Couldn't sent file-unknown response");
|
||||
peer.disconnect("failed to send file-unknown response");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);
|
||||
|
@@ -4,125 +4,186 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.MessageType;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.net.http.HttpResponse;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.lang.Thread.NORM_PRIORITY;
|
||||
|
||||
public class ArbitraryDataFileRequestThread implements Runnable {
|
||||
public class ArbitraryDataFileRequestThread {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileRequestThread.class);
|
||||
|
||||
public ArbitraryDataFileRequestThread() {
|
||||
private static final Integer FETCHER_LIMIT_PER_PEER = Settings.getInstance().getMaxThreadsForMessageType(MessageType.GET_ARBITRARY_DATA_FILE);
|
||||
private static final String FETCHER_THREAD_PREFIX = "Arbitrary Data Fetcher ";
|
||||
|
||||
private ConcurrentHashMap<String, ExecutorService> executorByPeer = new ConcurrentHashMap<>();
|
||||
|
||||
private ArbitraryDataFileRequestThread() {
|
||||
cleanupExecutorByPeerScheduler.scheduleAtFixedRate(this::cleanupExecutorsByPeer, 1, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Arbitrary Data File Request Thread");
|
||||
Thread.currentThread().setPriority(NORM_PRIORITY);
|
||||
private static ArbitraryDataFileRequestThread instance = null;
|
||||
|
||||
public static ArbitraryDataFileRequestThread getInstance() {
|
||||
|
||||
if( instance == null ) {
|
||||
instance = new ArbitraryDataFileRequestThread();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
private final ScheduledExecutorService cleanupExecutorByPeerScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
private void cleanupExecutorsByPeer() {
|
||||
|
||||
try {
|
||||
while (!Controller.isStopping()) {
|
||||
Long now = NTP.getTime();
|
||||
this.processFileHashes(now);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall-through to exit thread...
|
||||
this.executorByPeer.forEach((key, value) -> {
|
||||
if (value instanceof ThreadPoolExecutor) {
|
||||
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) value;
|
||||
if (threadPoolExecutor.getActiveCount() == 0) {
|
||||
threadPoolExecutor.shutdown();
|
||||
if (this.executorByPeer.computeIfPresent(key, (k, v) -> null) == null) {
|
||||
LOGGER.trace("removed executor: peer = " + key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.warn("casting issue in cleanup");
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void processFileHashes(Long now) throws InterruptedException {
|
||||
public void processFileHashes(Long now, List<ArbitraryFileListResponseInfo> responseInfos, ArbitraryDataFileManager arbitraryDataFileManager) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
|
||||
String signature58 = null;
|
||||
String hash58 = null;
|
||||
Peer peer = null;
|
||||
boolean shouldProcess = false;
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(responseInfos.size());
|
||||
Map<String, List<ArbitraryFileListResponseInfo>> responseInfoBySignature58 = new HashMap<>();
|
||||
|
||||
synchronized (arbitraryDataFileManager.arbitraryDataFileHashResponses) {
|
||||
if (!arbitraryDataFileManager.arbitraryDataFileHashResponses.isEmpty()) {
|
||||
for( ArbitraryFileListResponseInfo responseInfo : responseInfos) {
|
||||
|
||||
// Sort by lowest number of node hops first
|
||||
Comparator<ArbitraryFileListResponseInfo> lowestHopsFirstComparator =
|
||||
Comparator.comparingInt(ArbitraryFileListResponseInfo::getRequestHops);
|
||||
arbitraryDataFileManager.arbitraryDataFileHashResponses.sort(lowestHopsFirstComparator);
|
||||
if( responseInfo == null ) continue;
|
||||
|
||||
Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryFileListResponseInfo responseInfo = (ArbitraryFileListResponseInfo) iterator.next();
|
||||
if (responseInfo == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
hash58 = responseInfo.getHash58();
|
||||
peer = responseInfo.getPeer();
|
||||
signature58 = responseInfo.getSignature58();
|
||||
Long timestamp = responseInfo.getTimestamp();
|
||||
|
||||
if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
|
||||
// Ignore - to be deleted
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if already requesting, but don't remove, as we might want to retry later
|
||||
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(hash58)) {
|
||||
// Already requesting - leave this attempt for later
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file
|
||||
shouldProcess = true;
|
||||
iterator.remove();
|
||||
break;
|
||||
}
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Peer peer = responseInfo.getPeer();
|
||||
|
||||
// if relay timeout, then move on
|
||||
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || responseInfo.getSignature58() == null || peer == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if already requesting, but don't remove, as we might want to retry later
|
||||
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(responseInfo.getHash58())) {
|
||||
// Already requesting - leave this attempt for later
|
||||
arbitraryDataFileManager.addResponse(responseInfo); // don't remove -> adding back, beacause it was removed already above
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
byte[] hash = Base58.decode(responseInfo.getHash58());
|
||||
byte[] signature = Base58.decode(responseInfo.getSignature58());
|
||||
|
||||
// check for null
|
||||
if (signature == null || hash == null || peer == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file, store and map data to process later
|
||||
signatureBySignature58.put(responseInfo.getSignature58(), signature);
|
||||
responseInfoBySignature58
|
||||
.computeIfAbsent(responseInfo.getSignature58(), signature58 -> new ArrayList<>())
|
||||
.add(responseInfo);
|
||||
}
|
||||
|
||||
if (!shouldProcess) {
|
||||
// Nothing to do
|
||||
Thread.sleep(1000L);
|
||||
return;
|
||||
}
|
||||
// if there are no signatures, then there is nothing to process and nothing query the database
|
||||
if( signatureBySignature58.isEmpty() ) return;
|
||||
|
||||
byte[] hash = Base58.decode(hash58);
|
||||
byte[] signature = Base58.decode(signature58);
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList = new ArrayList<>();
|
||||
|
||||
// Fetch the transaction data
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (signature == null || hash == null || peer == null || arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.trace("Fetching file {} from peer {} via request thread...", hash58, peer);
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
|
||||
|
||||
arbitraryTransactionDataList.addAll(
|
||||
ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signatureBySignature58.values())));
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to process file hashes: {}", e.getMessage());
|
||||
LOGGER.warn("Unable to fetch transaction data: {}", e.getMessage());
|
||||
}
|
||||
|
||||
if( !arbitraryTransactionDataList.isEmpty() ) {
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
for(ArbitraryTransactionData data : arbitraryTransactionDataList ) {
|
||||
String signature58 = Base58.encode(data.getSignature());
|
||||
for( ArbitraryFileListResponseInfo responseInfo : responseInfoBySignature58.get(signature58)) {
|
||||
Runnable fetcher = () -> arbitraryDataFileFetcher(arbitraryDataFileManager, responseInfo, data);
|
||||
this.executorByPeer
|
||||
.computeIfAbsent(
|
||||
responseInfo.getPeer().toString(),
|
||||
peer -> Executors.newFixedThreadPool(
|
||||
FETCHER_LIMIT_PER_PEER,
|
||||
new NamedThreadFactory(FETCHER_THREAD_PREFIX + responseInfo.getPeer().toString(), NORM_PRIORITY)
|
||||
)
|
||||
)
|
||||
.execute(fetcher);
|
||||
}
|
||||
}
|
||||
long timeLapse = System.currentTimeMillis() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void arbitraryDataFileFetcher(ArbitraryDataFileManager arbitraryDataFileManager, ArbitraryFileListResponseInfo responseInfo, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
try {
|
||||
Long now = NTP.getTime();
|
||||
|
||||
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT ) {
|
||||
|
||||
Peer peer = responseInfo.getPeer();
|
||||
String hash58 = responseInfo.getHash58();
|
||||
String signature58 = responseInfo.getSignature58();
|
||||
LOGGER.debug("Peer {} version {} didn't fetch data file {} for signature {} due to relay timeout.", peer, peer.getPeersVersionString(), hash58, signature58);
|
||||
return;
|
||||
}
|
||||
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(
|
||||
responseInfo.getPeer(),
|
||||
arbitraryTransactionData.getSignature(),
|
||||
arbitraryTransactionData,
|
||||
Arrays.asList(Base58.decode(responseInfo.getHash58()))
|
||||
);
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Unable to process file hashes: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
@@ -10,6 +10,8 @@ import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.event.DataMonitorEvent;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.repository.DataException;
|
||||
@@ -28,6 +30,7 @@ import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ArbitraryDataManager extends Thread {
|
||||
|
||||
@@ -39,10 +42,10 @@ public class ArbitraryDataManager extends Thread {
|
||||
private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value
|
||||
|
||||
/** Request timeout when transferring arbitrary data */
|
||||
public static final long ARBITRARY_REQUEST_TIMEOUT = 12 * 1000L; // ms
|
||||
public static final long ARBITRARY_REQUEST_TIMEOUT = 24 * 1000L; // ms
|
||||
|
||||
/** Maximum time to hold information about an in-progress relay */
|
||||
public static final long ARBITRARY_RELAY_TIMEOUT = 60 * 1000L; // ms
|
||||
public static final long ARBITRARY_RELAY_TIMEOUT = 120 * 1000L; // ms
|
||||
|
||||
/** Maximum time to hold direct peer connection information */
|
||||
public static final long ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT = 2 * 60 * 1000L; // ms
|
||||
@@ -195,13 +198,35 @@ public class ArbitraryDataManager extends Thread {
|
||||
final int limit = 100;
|
||||
int offset = 0;
|
||||
|
||||
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
if( name == null ) {
|
||||
allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository()
|
||||
.getLatestArbitraryTransactions();
|
||||
}
|
||||
else {
|
||||
allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository()
|
||||
.getLatestArbitraryTransactionsByName(name);
|
||||
}
|
||||
} catch( Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
// collect processed transactions in a set to ensure outdated data transactions do not get fetched
|
||||
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
|
||||
|
||||
while (!isStopping) {
|
||||
Thread.sleep(1000L);
|
||||
|
||||
// Any arbitrary transactions we want to fetch data for?
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true);
|
||||
// LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
|
||||
List<byte[]> signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
|
||||
|
||||
if (signatures == null || signatures.isEmpty()) {
|
||||
offset = 0;
|
||||
break;
|
||||
@@ -223,14 +248,38 @@ public class ArbitraryDataManager extends Thread {
|
||||
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
|
||||
|
||||
// Skip transactions that we don't need to proactively store data for
|
||||
if (!storageManager.shouldPreFetchData(repository, arbitraryTransactionData)) {
|
||||
ArbitraryDataExamination arbitraryDataExamination = storageManager.shouldPreFetchData(repository, arbitraryTransactionData);
|
||||
if (!arbitraryDataExamination.isPass()) {
|
||||
iterator.remove();
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
arbitraryDataExamination.getNotes(),
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Remove transactions that we already have local data for
|
||||
if (hasLocalData(arbitraryTransaction)) {
|
||||
iterator.remove();
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"already have local data, skipping",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,8 +297,21 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Check to see if we have had a more recent PUT
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
if (hasMoreRecentPutTransaction) {
|
||||
|
||||
Optional<ArbitraryTransactionData> moreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
|
||||
if (moreRecentPutTransaction.isPresent()) {
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"not fetching old data",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
moreRecentPutTransaction.get().getTimestamp()
|
||||
)
|
||||
);
|
||||
// There is a more recent PUT transaction than the one we are currently processing.
|
||||
// When a PUT is issued, it replaces any layers that would have been there before.
|
||||
// Therefore any data relating to this older transaction is no longer needed and we
|
||||
@@ -257,10 +319,34 @@ public class ArbitraryDataManager extends Thread {
|
||||
continue;
|
||||
}
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"fetching data",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
|
||||
// Ask our connected peers if they have files for this signature
|
||||
// This process automatically then fetches the files themselves if a peer is found
|
||||
fetchData(arbitraryTransactionData);
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"fetched data",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
}
|
||||
@@ -274,6 +360,20 @@ public class ArbitraryDataManager extends Thread {
|
||||
final int limit = 100;
|
||||
int offset = 0;
|
||||
|
||||
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
allArbitraryTransactionsInDescendingOrder
|
||||
= repository.getArbitraryRepository()
|
||||
.getLatestArbitraryTransactions();
|
||||
} catch( Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
// collect processed transactions in a set to ensure outdated data transactions do not get fetched
|
||||
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
|
||||
|
||||
while (!isStopping) {
|
||||
final int minSeconds = 3;
|
||||
final int maxSeconds = 10;
|
||||
@@ -282,8 +382,8 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Any arbitrary transactions we want to fetch data for?
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
|
||||
// LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
|
||||
List<byte[]> signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
|
||||
|
||||
if (signatures == null || signatures.isEmpty()) {
|
||||
offset = 0;
|
||||
break;
|
||||
@@ -328,26 +428,74 @@ public class ArbitraryDataManager extends Thread {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check to see if we have had a more recent PUT
|
||||
// No longer need to see if we have had a more recent PUT since we compared the transactions to process
|
||||
// to the transactions previously processed, so we can fetch the transactiondata, notify the event bus,
|
||||
// fetch the metadata and notify the event bus again
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
if (hasMoreRecentPutTransaction) {
|
||||
// There is a more recent PUT transaction than the one we are currently processing.
|
||||
// When a PUT is issued, it replaces any layers that would have been there before.
|
||||
// Therefore any data relating to this older transaction is no longer needed and we
|
||||
// shouldn't fetch it from the network.
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ask our connected peers if they have metadata for this signature
|
||||
fetchMetadata(arbitraryTransactionData);
|
||||
|
||||
EventBus.INSTANCE.notify(
|
||||
new DataMonitorEvent(
|
||||
System.currentTimeMillis(),
|
||||
arbitraryTransactionData.getIdentifier(),
|
||||
arbitraryTransactionData.getName(),
|
||||
arbitraryTransactionData.getService().name(),
|
||||
"fetched metadata",
|
||||
arbitraryTransactionData.getTimestamp(),
|
||||
arbitraryTransactionData.getTimestamp()
|
||||
)
|
||||
);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static List<byte[]> processTransactionsForSignatures(
|
||||
int limit,
|
||||
int offset,
|
||||
List<ArbitraryTransactionData> transactionsInDescendingOrder,
|
||||
Set<ArbitraryTransactionDataHashWrapper> processedTransactions) {
|
||||
// these transactions are in descending order, latest transactions come first
|
||||
List<ArbitraryTransactionData> transactions
|
||||
= transactionsInDescendingOrder.stream()
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// wrap the transactions, so they can be used for hashing and comparing
|
||||
// Class ArbitraryTransactionDataHashWrapper supports hashCode() and equals(...) for this purpose
|
||||
List<ArbitraryTransactionDataHashWrapper> wrappedTransactions
|
||||
= transactions.stream()
|
||||
.map(transaction -> new ArbitraryTransactionDataHashWrapper(transaction))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// create a set of wrappers and populate it first to last, so that all outdated transactions get rejected
|
||||
Set<ArbitraryTransactionDataHashWrapper> transactionsToProcess = new HashSet<>(wrappedTransactions.size());
|
||||
for(ArbitraryTransactionDataHashWrapper wrappedTransaction : wrappedTransactions) {
|
||||
transactionsToProcess.add(wrappedTransaction);
|
||||
}
|
||||
|
||||
// remove the matches for previously processed transactions,
|
||||
// because these transactions have had updates that have already been processed
|
||||
transactionsToProcess.removeAll(processedTransactions);
|
||||
|
||||
// add to processed transactions to compare and remove matches from future processing iterations
|
||||
processedTransactions.addAll(transactionsToProcess);
|
||||
|
||||
List<byte[]> signatures
|
||||
= transactionsToProcess.stream()
|
||||
.map(transactionToProcess -> transactionToProcess.getData()
|
||||
.getSignature())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return signatures;
|
||||
}
|
||||
|
||||
private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) {
|
||||
try {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
|
@@ -47,15 +47,15 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
|
||||
private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes
|
||||
|
||||
/** Treat storage as full at 90% usage, to reduce risk of going over the limit.
|
||||
/** Treat storage as full at 80% usage, to reduce risk of going over the limit.
|
||||
* This is necessary because we don't calculate total storage values before every write.
|
||||
* It also helps avoid a fetch/delete loop, as we will stop fetching before the hard limit.
|
||||
* This must be lower than DELETION_THRESHOLD. */
|
||||
private static final double STORAGE_FULL_THRESHOLD = 0.90f; // 90%
|
||||
private static final double STORAGE_FULL_THRESHOLD = 0.8f; // 80%
|
||||
|
||||
/** Start deleting files once we reach 98% usage.
|
||||
/** Start deleting files once we reach 90% usage.
|
||||
* This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */
|
||||
public static final double DELETION_THRESHOLD = 0.98f; // 98%
|
||||
public static final double DELETION_THRESHOLD = 0.9f; // 90%
|
||||
|
||||
private static final long PER_NAME_STORAGE_MULTIPLIER = 4L;
|
||||
|
||||
@@ -155,31 +155,24 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
* @param arbitraryTransactionData - the transaction
|
||||
* @return boolean - whether to prefetch or not
|
||||
*/
|
||||
public boolean shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
public ArbitraryDataExamination shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
String name = arbitraryTransactionData.getName();
|
||||
|
||||
// Only fetch data associated with hashes, as we already have RAW_DATA
|
||||
if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) {
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false, "Only fetch data associated with hashes");
|
||||
}
|
||||
|
||||
// Don't fetch anything more if we're (nearly) out of space
|
||||
// Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
|
||||
// avoid a fetch/delete loop
|
||||
if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't fetch anything if we're (nearly) out of space for this name
|
||||
// Again, make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
|
||||
// avoid a fetch/delete loop
|
||||
if (!this.isStorageSpaceAvailableForName(repository, arbitraryTransactionData.getName(), STORAGE_FULL_THRESHOLD)) {
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false,"Don't fetch anything more if we're (nearly) out of space");
|
||||
}
|
||||
|
||||
// Don't store data unless it's an allowed type (public/private)
|
||||
if (!this.isDataTypeAllowed(arbitraryTransactionData)) {
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false, "Don't store data unless it's an allowed type (public/private)");
|
||||
}
|
||||
|
||||
// Handle transactions without names differently
|
||||
@@ -189,21 +182,21 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
|
||||
// Never fetch data from blocked names, even if they are followed
|
||||
if (ListUtils.isNameBlocked(name)) {
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false, "blocked name");
|
||||
}
|
||||
|
||||
switch (Settings.getInstance().getStoragePolicy()) {
|
||||
case FOLLOWED:
|
||||
case FOLLOWED_OR_VIEWED:
|
||||
return ListUtils.isFollowingName(name);
|
||||
return new ArbitraryDataExamination(ListUtils.isFollowingName(name), Settings.getInstance().getStoragePolicy().name());
|
||||
|
||||
case ALL:
|
||||
return true;
|
||||
return new ArbitraryDataExamination(true, Settings.getInstance().getStoragePolicy().name());
|
||||
|
||||
case NONE:
|
||||
case VIEWED:
|
||||
default:
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,17 +207,17 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
*
|
||||
* @return boolean - whether the storage policy allows for unnamed data
|
||||
*/
|
||||
private boolean shouldPreFetchDataWithoutName() {
|
||||
private ArbitraryDataExamination shouldPreFetchDataWithoutName() {
|
||||
switch (Settings.getInstance().getStoragePolicy()) {
|
||||
case ALL:
|
||||
return true;
|
||||
return new ArbitraryDataExamination(true, "Fetching all data");
|
||||
|
||||
case NONE:
|
||||
case VIEWED:
|
||||
case FOLLOWED:
|
||||
case FOLLOWED_OR_VIEWED:
|
||||
default:
|
||||
return false;
|
||||
return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,51 +477,6 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean isStorageSpaceAvailableForName(Repository repository, String name, double threshold) {
|
||||
if (!this.isStorageSpaceAvailable(threshold)) {
|
||||
// No storage space available at all, so no need to check this name
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Settings.getInstance().getStoragePolicy() == StoragePolicy.ALL) {
|
||||
// Using storage policy ALL, so don't limit anything per name
|
||||
return true;
|
||||
}
|
||||
|
||||
if (name == null) {
|
||||
// This transaction doesn't have a name, so fall back to total space limitations
|
||||
return true;
|
||||
}
|
||||
|
||||
int followedNamesCount = ListUtils.followedNamesCount();
|
||||
if (followedNamesCount == 0) {
|
||||
// Not following any names, so we have space
|
||||
return true;
|
||||
}
|
||||
|
||||
long totalSizeForName = 0;
|
||||
long maxStoragePerName = this.storageCapacityPerName(threshold);
|
||||
|
||||
// Fetch all hosted transactions
|
||||
List<ArbitraryTransactionData> hostedTransactions = this.listAllHostedTransactions(repository, null, null);
|
||||
for (ArbitraryTransactionData transactionData : hostedTransactions) {
|
||||
String transactionName = transactionData.getName();
|
||||
if (!Objects.equals(name, transactionName)) {
|
||||
// Transaction relates to a different name
|
||||
continue;
|
||||
}
|
||||
|
||||
totalSizeForName += transactionData.getSize();
|
||||
}
|
||||
|
||||
// Have we reached the limit for this name?
|
||||
if (totalSizeForName > maxStoragePerName) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public long storageCapacityPerName(double threshold) {
|
||||
int followedNamesCount = ListUtils.followedNamesCount();
|
||||
if (followedNamesCount == 0) {
|
||||
|
@@ -24,6 +24,11 @@ import org.qortal.utils.Triple;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*;
|
||||
|
||||
@@ -61,6 +66,7 @@ public class ArbitraryMetadataManager {
|
||||
|
||||
|
||||
private ArbitraryMetadataManager() {
|
||||
scheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryMetadataMessage, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryMetadataManager getInstance() {
|
||||
@@ -354,9 +360,8 @@ public class ArbitraryMetadataManager {
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer);
|
||||
if (!requestingPeer.sendMessage(forwardArbitraryMetadataMessage)) {
|
||||
requestingPeer.disconnect("failed to forward arbitrary metadata");
|
||||
}
|
||||
requestingPeer.sendMessage(forwardArbitraryMetadataMessage);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -371,107 +376,159 @@ public class ArbitraryMetadataManager {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> messageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object lock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetArbitraryMetadataMessage(Peer peer, Message message) {
|
||||
|
||||
// Don't respond if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) message;
|
||||
byte[] signature = getArbitraryMetadataMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryMetadataRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peer, signature58);
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Received metadata request from peer {} for signature {}", peer, signature58);
|
||||
|
||||
ArbitraryTransactionData transactionData = null;
|
||||
ArbitraryDataFile metadataFile = null;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get its metadata hash
|
||||
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData instanceof ArbitraryTransactionData) {
|
||||
|
||||
// Check if we're even allowed to serve metadata for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
if (metadataHash != null) {
|
||||
|
||||
// Load metadata file
|
||||
metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata for peer %s", peer), e);
|
||||
}
|
||||
|
||||
// We should only respond if we have the metadata file
|
||||
if (metadataFile != null && metadataFile.exists()) {
|
||||
|
||||
// We have the metadata file, so update requests map to reflect that we've sent it
|
||||
newEntry = new Triple<>(null, null, now);
|
||||
arbitraryMetadataRequests.put(message.getId(), newEntry);
|
||||
|
||||
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, metadataFile);
|
||||
arbitraryMetadataMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(arbitraryMetadataMessage)) {
|
||||
LOGGER.debug("Couldn't send metadata");
|
||||
peer.disconnect("failed to send metadata");
|
||||
return;
|
||||
}
|
||||
LOGGER.debug("Sent metadata");
|
||||
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because metadata request is fully served");
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
long requestTime = getArbitraryMetadataMessage.getRequestTime();
|
||||
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
|
||||
relayGetArbitraryMetadataMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
|
||||
|
||||
}
|
||||
else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
}
|
||||
else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
synchronized (lock) {
|
||||
messageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
private void processNetworkGetArbitraryMetadataMessage() {
|
||||
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (lock) {
|
||||
messagesToProcess = new ArrayList<>(messageList);
|
||||
messageList.clear();
|
||||
}
|
||||
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>((messagesToProcess.size()));
|
||||
Map<String, Long> nowBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String,PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
for( PeerMessage peerMessage : messagesToProcess) {
|
||||
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
|
||||
byte[] signature = getArbitraryMetadataMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peerMessage.peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryMetadataRequests.putIfAbsent(peerMessage.message.getId(), newEntry) != null) {
|
||||
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peerMessage.peer, signature58);
|
||||
continue;
|
||||
}
|
||||
|
||||
LOGGER.debug("Received metadata request from peer {} for signature {}", peerMessage.peer, signature58);
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
nowBySignature58.put(signature58, now);
|
||||
peerMessageBySignature58.put(signature58, peerMessage);
|
||||
}
|
||||
|
||||
if( signatureBySignature58.isEmpty() ) return;
|
||||
|
||||
List<TransactionData> transactionDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get its metadata hash
|
||||
transactionDataList = repository.getTransactionRepository().fromSignatures(new ArrayList(signatureBySignature58.values()));
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary transactions"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
Map<String, ArbitraryTransactionData> dataBySignature58
|
||||
= transactionDataList.stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(ArbitraryTransactionData.class::cast)
|
||||
.collect(Collectors.toMap(data -> Base58.encode(data.getSignature()), Function.identity()));
|
||||
|
||||
for(Map.Entry<String, ArbitraryTransactionData> entry : dataBySignature58.entrySet()) {
|
||||
String signature58 = entry.getKey();
|
||||
ArbitraryTransactionData transactionData = entry.getValue();
|
||||
|
||||
try {
|
||||
|
||||
// Check if we're even allowed to serve metadata for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
if (metadataHash != null) {
|
||||
|
||||
// Load metadata file
|
||||
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, transactionData.getSignature());
|
||||
// We should only respond if we have the metadata file
|
||||
if (metadataFile != null && metadataFile.exists()) {
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Message message = peerMessage.message;
|
||||
Peer peer = peerMessage.peer;
|
||||
|
||||
// We have the metadata file, so update requests map to reflect that we've sent it
|
||||
Triple newEntry = new Triple<>(null, null, nowBySignature58.get(signature58));
|
||||
arbitraryMetadataRequests.put(message.getId(), newEntry);
|
||||
|
||||
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(entry.getValue().getSignature(), metadataFile);
|
||||
arbitraryMetadataMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(arbitraryMetadataMessage)) {
|
||||
LOGGER.debug("Couldn't send metadata");
|
||||
continue;
|
||||
}
|
||||
LOGGER.debug("Sent metadata");
|
||||
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because metadata request is fully served");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata"), e);
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionDataList == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
|
||||
long requestTime = getArbitraryMetadataMessage.getRequestTime();
|
||||
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = nowBySignature58.get(signature58) - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
byte[] signature = signatureBySignature58.get(signature58);
|
||||
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
|
||||
relayGetArbitraryMetadataMessage.setId(getArbitraryMetadataMessage.getId());
|
||||
|
||||
Peer peer = peerMessage.peer;
|
||||
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
|
||||
|
||||
} else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
} else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,48 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class ArbitraryTransactionDataHashWrapper {
|
||||
|
||||
private ArbitraryTransactionData data;
|
||||
|
||||
private int service;
|
||||
|
||||
private String name;
|
||||
|
||||
private String identifier;
|
||||
|
||||
public ArbitraryTransactionDataHashWrapper(ArbitraryTransactionData data) {
|
||||
this.data = data;
|
||||
|
||||
this.service = data.getService().value;
|
||||
this.name = data.getName();
|
||||
this.identifier = data.getIdentifier();
|
||||
}
|
||||
|
||||
public ArbitraryTransactionDataHashWrapper(int service, String name, String identifier) {
|
||||
this.service = service;
|
||||
this.name = name;
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
public ArbitraryTransactionData getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ArbitraryTransactionDataHashWrapper that = (ArbitraryTransactionDataHashWrapper) o;
|
||||
return service == that.service && name.equals(that.name) && Objects.equals(identifier, that.identifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(service, name, identifier);
|
||||
}
|
||||
}
|
130
src/main/java/org/qortal/controller/arbitrary/Follower.java
Normal file
130
src/main/java/org/qortal/controller/arbitrary/Follower.java
Normal file
@@ -0,0 +1,130 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ListUtils;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class Follower {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Follower.class);
|
||||
|
||||
private ScheduledExecutorService service
|
||||
= Executors.newScheduledThreadPool(2, new NamedThreadFactory("Follower", Thread.NORM_PRIORITY));
|
||||
|
||||
private Follower() {
|
||||
|
||||
}
|
||||
|
||||
private static Follower instance;
|
||||
|
||||
public static Follower getInstance() {
|
||||
|
||||
if( instance == null ) {
|
||||
instance = new Follower();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
|
||||
// fetch arbitrary transactions from followed names from the last 100 blocks every 2 minutes
|
||||
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.of(100)), 10, 2, TimeUnit.MINUTES);
|
||||
|
||||
// fetch arbitrary transaction from followed names from any block every 24 hours
|
||||
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.empty()), 4, 24, TimeUnit.HOURS);
|
||||
}
|
||||
|
||||
private void fetch(OptionalInt limit) {
|
||||
|
||||
try {
|
||||
// for each followed name, get arbitraty transactions, then examine those transactions before fetching
|
||||
for (String name : ListUtils.followedNames()) {
|
||||
|
||||
List<ArbitraryTransactionData> transactionsInReverseOrder;
|
||||
|
||||
// open database to get the transactions in reverse order for the followed name
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<ArbitraryTransactionData> latestArbitraryTransactionsByName
|
||||
= repository.getArbitraryRepository().getLatestArbitraryTransactionsByName(name);
|
||||
|
||||
if (limit.isPresent()) {
|
||||
final int blockHeightThreshold = repository.getBlockRepository().getBlockchainHeight() - limit.getAsInt();
|
||||
|
||||
transactionsInReverseOrder
|
||||
= latestArbitraryTransactionsByName.stream().filter(tx -> tx.getBlockHeight() > blockHeightThreshold)
|
||||
.collect(Collectors.toList());
|
||||
} else {
|
||||
transactionsInReverseOrder = latestArbitraryTransactionsByName;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
transactionsInReverseOrder = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
// collect process transaction hashes, so we don't fetch outdated transactions
|
||||
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
|
||||
|
||||
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
|
||||
|
||||
// for each arbitrary transaction for the followed name process, evaluate, fetch
|
||||
for (ArbitraryTransactionData arbitraryTransaction : transactionsInReverseOrder) {
|
||||
|
||||
boolean examined = false;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// if not processed
|
||||
if (!processedTransactions.contains(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction))) {
|
||||
boolean isLocal = repository.getArbitraryRepository().isDataLocal(arbitraryTransaction.getSignature());
|
||||
|
||||
// if not local, then continue to evaluate
|
||||
if (!isLocal) {
|
||||
|
||||
// evaluate fetching status for this transaction on this node
|
||||
ArbitraryDataExamination examination = storageManager.shouldPreFetchData(repository, arbitraryTransaction);
|
||||
|
||||
// if the evaluation passed, then fetch
|
||||
examined = examination.isPass();
|
||||
}
|
||||
// if locally stored, then nothing needs to be done
|
||||
|
||||
// add to processed transactions
|
||||
processedTransactions.add(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction));
|
||||
}
|
||||
}
|
||||
|
||||
// if passed examination for fetching, then fetch
|
||||
if (examined) {
|
||||
LOGGER.info("for {} on {}, fetching {}", name, arbitraryTransaction.getService(), arbitraryTransaction.getIdentifier());
|
||||
boolean fetched = ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(arbitraryTransaction);
|
||||
|
||||
LOGGER.info("fetched = " + fetched);
|
||||
}
|
||||
|
||||
// pause a second before moving on to another transaction
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.Message;
|
||||
|
||||
public class PeerMessage {
|
||||
Peer peer;
|
||||
Message message;
|
||||
|
||||
public PeerMessage(Peer peer, Message message) {
|
||||
this.peer = peer;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Peer getPeer() {
|
||||
return peer;
|
||||
}
|
||||
|
||||
public Message getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
@@ -0,0 +1,33 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
|
||||
import java.util.TimerTask;
|
||||
|
||||
public class RebuildArbitraryResourceCacheTask extends TimerTask {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(RebuildArbitraryResourceCacheTask.class);
|
||||
|
||||
public static final long MILLIS_IN_HOUR = 60 * 60 * 1000;
|
||||
|
||||
public static final long MILLIS_IN_MINUTE = 60 * 1000;
|
||||
|
||||
private static final String REBUILD_ARBITRARY_RESOURCE_CACHE_TASK = "Rebuild Arbitrary Resource Cache Task";
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
Thread.currentThread().setName(REBUILD_ARBITRARY_RESOURCE_CACHE_TASK);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, true);
|
||||
}
|
||||
catch( DataException e ) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,139 @@
|
||||
package org.qortal.controller.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.util.PropertySource;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.BlockHeightRange;
|
||||
import org.qortal.data.account.BlockHeightRangeAddressAmounts;
|
||||
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.BalanceRecorderUtils;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class HSQLDBBalanceRecorder extends Thread{
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBBalanceRecorder.class);
|
||||
|
||||
private static HSQLDBBalanceRecorder SINGLETON = null;
|
||||
|
||||
private ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight = new ConcurrentHashMap<>();
|
||||
|
||||
private ConcurrentHashMap<String, List<AccountBalanceData>> balancesByAddress = new ConcurrentHashMap<>();
|
||||
|
||||
private CopyOnWriteArrayList<BlockHeightRangeAddressAmounts> balanceDynamics = new CopyOnWriteArrayList<>();
|
||||
|
||||
private int priorityRequested;
|
||||
private int frequency;
|
||||
private int capacity;
|
||||
|
||||
private HSQLDBBalanceRecorder( int priorityRequested, int frequency, int capacity) {
|
||||
|
||||
super("Balance Recorder");
|
||||
|
||||
this.priorityRequested = priorityRequested;
|
||||
this.frequency = frequency;
|
||||
this.capacity = capacity;
|
||||
}
|
||||
|
||||
public static Optional<HSQLDBBalanceRecorder> getInstance() {
|
||||
|
||||
if( SINGLETON == null ) {
|
||||
|
||||
SINGLETON
|
||||
= new HSQLDBBalanceRecorder(
|
||||
Settings.getInstance().getBalanceRecorderPriority(),
|
||||
Settings.getInstance().getBalanceRecorderFrequency(),
|
||||
Settings.getInstance().getBalanceRecorderCapacity()
|
||||
);
|
||||
|
||||
}
|
||||
else if( SINGLETON == null ) {
|
||||
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return Optional.of(SINGLETON);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
Thread.currentThread().setName("Balance Recorder");
|
||||
|
||||
HSQLDBCacheUtils.startRecordingBalances(this.balancesByHeight, this.balanceDynamics, this.priorityRequested, this.frequency, this.capacity);
|
||||
}
|
||||
|
||||
public List<BlockHeightRangeAddressAmounts> getLatestDynamics(int limit, long offset) {
|
||||
|
||||
List<BlockHeightRangeAddressAmounts> latest = this.balanceDynamics.stream()
|
||||
.sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_ADDRESS_AMOUNTS_COMPARATOR.reversed())
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return latest;
|
||||
}
|
||||
|
||||
public List<BlockHeightRange> getRanges(Integer offset, Integer limit, Boolean reverse) {
|
||||
|
||||
if( reverse ) {
|
||||
return this.balanceDynamics.stream()
|
||||
.map(BlockHeightRangeAddressAmounts::getRange)
|
||||
.sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_COMPARATOR.reversed())
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
else {
|
||||
return this.balanceDynamics.stream()
|
||||
.map(BlockHeightRangeAddressAmounts::getRange)
|
||||
.sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_COMPARATOR)
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
public Optional<BlockHeightRangeAddressAmounts> getAddressAmounts(BlockHeightRange range) {
|
||||
|
||||
return this.balanceDynamics.stream()
|
||||
.filter( dynamic -> dynamic.getRange().equals(range))
|
||||
.findAny();
|
||||
}
|
||||
|
||||
public Optional<BlockHeightRange> getRange( int height ) {
|
||||
return this.balanceDynamics.stream()
|
||||
.map(BlockHeightRangeAddressAmounts::getRange)
|
||||
.filter( range -> range.getBegin() < height && range.getEnd() >= height )
|
||||
.findAny();
|
||||
}
|
||||
|
||||
private Optional<Integer> getLastHeight() {
|
||||
return this.balancesByHeight.keySet().stream().sorted(Comparator.reverseOrder()).findFirst();
|
||||
}
|
||||
|
||||
public List<Integer> getBlocksRecorded() {
|
||||
|
||||
return this.balancesByHeight.keySet().stream().collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public List<AccountBalanceData> getAccountBalanceRecordings(String address) {
|
||||
return this.balancesByAddress.get(address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "HSQLDBBalanceRecorder{" +
|
||||
"priorityRequested=" + priorityRequested +
|
||||
", frequency=" + frequency +
|
||||
", capacity=" + capacity +
|
||||
'}';
|
||||
}
|
||||
}
|
@@ -8,11 +8,7 @@ import org.qortal.settings.Settings;
|
||||
|
||||
public class HSQLDBDataCacheManager extends Thread{
|
||||
|
||||
private HSQLDBRepository respository;
|
||||
|
||||
public HSQLDBDataCacheManager(HSQLDBRepository respository) {
|
||||
this.respository = respository;
|
||||
}
|
||||
public HSQLDBDataCacheManager() {}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
@@ -20,8 +16,7 @@ public class HSQLDBDataCacheManager extends Thread{
|
||||
|
||||
HSQLDBCacheUtils.startCaching(
|
||||
Settings.getInstance().getDbCacheThreadPriority(),
|
||||
Settings.getInstance().getDbCacheFrequency(),
|
||||
this.respository
|
||||
Settings.getInstance().getDbCacheFrequency()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -39,15 +39,24 @@ public class AtStatesPruner implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
int pruneStartHeight;
|
||||
int maxLatestAtStatesHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||
repository.saveChanges();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@@ -102,28 +111,25 @@ public class AtStatesPruner implements Runnable {
|
||||
|
||||
final int finalPruneStartHeight = pruneStartHeight;
|
||||
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// We've pruned up to the upper prunable height
|
||||
// Back off for a while to save CPU for syncing
|
||||
repository.discardChanges();
|
||||
Thread.sleep(5*60*1000L);
|
||||
Thread.sleep(5 * 60 * 1000L);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
if(Controller.isStopping()) {
|
||||
if (Controller.isStopping()) {
|
||||
LOGGER.info("AT States Pruning Shutting Down");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -26,15 +26,23 @@ public class AtStatesTrimmer implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int trimStartHeight;
|
||||
int maxLatestAtStatesHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||
repository.saveChanges();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@@ -92,9 +100,9 @@ public class AtStatesTrimmer implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -30,11 +30,13 @@ public class BlockArchiver implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int startHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Don't even start building until initial rush has ended
|
||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||
|
||||
int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
|
||||
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
@@ -43,10 +45,16 @@ public class BlockArchiver implements Runnable {
|
||||
repository.discardChanges();
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
||||
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@@ -107,20 +115,17 @@ public class BlockArchiver implements Runnable {
|
||||
LOGGER.info("Caught exception when creating block cache", e);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
if(Controller.isStopping()) {
|
||||
if (Controller.isStopping()) {
|
||||
LOGGER.info("Block Archiving Shutting Down");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -39,8 +39,10 @@ public class BlockPruner implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
int pruneStartHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
|
||||
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
@@ -48,8 +50,15 @@ public class BlockPruner implements Runnable {
|
||||
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@@ -122,10 +131,9 @@ public class BlockPruner implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -28,13 +28,21 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int trimStartHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Don't even start trimming until initial rush has ended
|
||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||
|
||||
int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@@ -88,10 +96,9 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.api.model.crosschain.TradeBotCreateRequest;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.controller.arbitrary.PeerMessage;
|
||||
import org.qortal.controller.tradebot.AcctTradeBot.ResponseResult;
|
||||
import org.qortal.crosschain.*;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@@ -37,7 +38,12 @@ import org.qortal.utils.NTP;
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Performing cross-chain trading steps on behalf of user.
|
||||
@@ -118,6 +124,9 @@ public class TradeBot implements Listener {
|
||||
private Map<String, Long> validTrades = new HashMap<>();
|
||||
|
||||
private TradeBot() {
|
||||
|
||||
tradePresenceMessageScheduler.scheduleAtFixedRate( this::processTradePresencesMessages, 60, 1, TimeUnit.SECONDS);
|
||||
|
||||
EventBus.INSTANCE.addListener(event -> TradeBot.getInstance().listen(event));
|
||||
}
|
||||
|
||||
@@ -551,77 +560,139 @@ public class TradeBot implements Listener {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> tradePresenceMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object tradePresenceMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService tradePresenceMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onTradePresencesMessage(Peer peer, Message message) {
|
||||
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) message;
|
||||
|
||||
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
|
||||
synchronized (tradePresenceMessageLock) {
|
||||
tradePresenceMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
long now = NTP.getTime();
|
||||
// Timestamps before this are too far into the past
|
||||
long pastThreshold = now;
|
||||
// Timestamps after this are too far into the future
|
||||
long futureThreshold = now + PRESENCE_LIFETIME;
|
||||
public void processTradePresencesMessages() {
|
||||
|
||||
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (tradePresenceMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(tradePresenceMessageList);
|
||||
tradePresenceMessageList.clear();
|
||||
}
|
||||
|
||||
int newCount = 0;
|
||||
if( messagesToProcess.isEmpty() ) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (TradePresenceData peersTradePresence : peersTradePresences) {
|
||||
long timestamp = peersTradePresence.getTimestamp();
|
||||
Map<Peer, List<TradePresenceData>> tradePresencesByPeer = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Ignore if timestamp is out of bounds
|
||||
if (timestamp < pastThreshold || timestamp > futureThreshold) {
|
||||
if (timestamp < pastThreshold)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
// map all trade presences from the messages to their peer
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) peerMessage.getMessage();
|
||||
|
||||
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
|
||||
|
||||
tradePresencesByPeer.put(peerMessage.getPeer(), peersTradePresences);
|
||||
}
|
||||
|
||||
long now = NTP.getTime();
|
||||
// Timestamps before this are too far into the past
|
||||
long pastThreshold = now;
|
||||
// Timestamps after this are too far into the future
|
||||
long futureThreshold = now + PRESENCE_LIFETIME;
|
||||
|
||||
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
|
||||
|
||||
int newCount = 0;
|
||||
|
||||
Map<String, List<Peer>> peersByAtAddress = new HashMap<>(tradePresencesByPeer.size());
|
||||
Map<String, TradePresenceData> tradePresenceByAtAddress = new HashMap<>(tradePresencesByPeer.size());
|
||||
|
||||
// for each batch of trade presence data from a peer, validate and populate the maps declared above
|
||||
for ( Map.Entry<Peer, List<TradePresenceData>> entry: tradePresencesByPeer.entrySet()) {
|
||||
|
||||
Peer peer = entry.getKey();
|
||||
|
||||
for( TradePresenceData peersTradePresence : entry.getValue() ) {
|
||||
// TradePresenceData peersTradePresence
|
||||
long timestamp = peersTradePresence.getTimestamp();
|
||||
|
||||
// Ignore if timestamp is out of bounds
|
||||
if (timestamp < pastThreshold || timestamp > futureThreshold) {
|
||||
if (timestamp < pastThreshold)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
|
||||
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
|
||||
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
|
||||
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
|
||||
if (timestamp == existingTradeData.getTimestamp())
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check timestamp signature
|
||||
byte[] timestampSignature = peersTradePresence.getSignature();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
byte[] publicKey = peersTradePresence.getPublicKey();
|
||||
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
|
||||
peersByAtAddress.computeIfAbsent(peersTradePresence.getAtAddress(), address -> new ArrayList<>()).add(peer);
|
||||
tradePresenceByAtAddress.put(peersTradePresence.getAtAddress(), peersTradePresence);
|
||||
}
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
if( tradePresenceByAtAddress.isEmpty() ) return;
|
||||
|
||||
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
|
||||
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
|
||||
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
|
||||
if (timestamp == existingTradeData.getTimestamp())
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
|
||||
);
|
||||
List<ATData> atDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
atDataList = repository.getATRepository().fromATAddresses( new ArrayList<>(tradePresenceByAtAddress.keySet()) );
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
return;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
Map<String, Supplier<ACCT>> supplierByAtAddress = new HashMap<>(atDataList.size());
|
||||
|
||||
// Check timestamp signature
|
||||
byte[] timestampSignature = peersTradePresence.getSignature();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
byte[] publicKey = peersTradePresence.getPublicKey();
|
||||
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
List<ATData> validatedAtDataList = new ArrayList<>(atDataList.size());
|
||||
|
||||
continue;
|
||||
}
|
||||
// for each trade
|
||||
for( ATData atData : atDataList ) {
|
||||
|
||||
ATData atData = repository.getATRepository().fromATAddress(peersTradePresence.getAtAddress());
|
||||
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(atData.getATAddress());
|
||||
if (atData == null || atData.getIsFrozen() || atData.getIsFinished()) {
|
||||
if (atData == null)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT doesn't exist",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT doesn't exist",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT is frozen or finished",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT is frozen or finished",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
|
||||
continue;
|
||||
@@ -630,51 +701,87 @@ public class TradeBot implements Listener {
|
||||
ByteArray atCodeHash = ByteArray.wrap(atData.getCodeHash());
|
||||
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(atCodeHash);
|
||||
if (acctSupplier == null) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT isn't a known ACCT?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT isn't a known ACCT?",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
CrossChainTradeData tradeData = acctSupplier.get().populateTradeData(repository, atData);
|
||||
if (tradeData == null) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as trade data not found?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Convert signer's public key to address form
|
||||
String signerAddress = peersTradePresence.getTradeAddress();
|
||||
|
||||
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
|
||||
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is new to us
|
||||
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
|
||||
++newCount;
|
||||
|
||||
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
|
||||
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
|
||||
validatedAtDataList.add(atData);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
}
|
||||
|
||||
if (newCount > 0) {
|
||||
LOGGER.debug("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
|
||||
rebuildSafeAllTradePresences();
|
||||
// populated data for each trade
|
||||
List<CrossChainTradeData> crossChainTradeDataList;
|
||||
|
||||
// validated trade data grouped by code (cross chain coin)
|
||||
Map<ByteArray, List<ATData>> atDataByCodeHash
|
||||
= validatedAtDataList.stream().collect(
|
||||
Collectors.groupingBy(data -> ByteArray.wrap(data.getCodeHash())));
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
crossChainTradeDataList = new ArrayList<>();
|
||||
|
||||
// for each code (cross chain coin), get each trade, then populate trade data
|
||||
for( Map.Entry<ByteArray, List<ATData>> entry : atDataByCodeHash.entrySet() ) {
|
||||
|
||||
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(entry.getKey());
|
||||
|
||||
crossChainTradeDataList.addAll(
|
||||
acctSupplier.get().populateTradeDataList(
|
||||
repository,
|
||||
entry.getValue()
|
||||
)
|
||||
.stream().filter( data -> data != null )
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
return;
|
||||
}
|
||||
|
||||
// for each populated trade data, validate and fire event
|
||||
for( CrossChainTradeData tradeData : crossChainTradeDataList ) {
|
||||
|
||||
List<Peer> peers = peersByAtAddress.get(tradeData.qortalAtAddress);
|
||||
|
||||
for( Peer peer : peers ) {
|
||||
|
||||
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(tradeData.qortalAtAddress);
|
||||
|
||||
// Convert signer's public key to address form
|
||||
String signerAddress = peersTradePresence.getTradeAddress();
|
||||
|
||||
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
|
||||
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
|
||||
// This is new to us
|
||||
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
|
||||
++newCount;
|
||||
|
||||
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
|
||||
peersTradePresence.getAtAddress(), peer, tradeData.creationTimestamp
|
||||
);
|
||||
|
||||
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
|
||||
}
|
||||
}
|
||||
|
||||
if (newCount > 0) {
|
||||
LOGGER.info("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
|
||||
rebuildSafeAllTradePresences();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,9 @@ import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
public interface ACCT {
|
||||
|
||||
public byte[] getCodeBytesHash();
|
||||
@@ -16,8 +19,12 @@ public interface ACCT {
|
||||
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException;
|
||||
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository respository, List<ATData> atDataList) throws DataException;
|
||||
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException;
|
||||
|
||||
CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException;
|
||||
|
||||
public byte[] buildCancelMessage(String creatorQortalAddress);
|
||||
|
||||
public byte[] findSecretA(Repository repository, CrossChainTradeData crossChainTradeData) throws DataException;
|
||||
|
@@ -1,5 +1,6 @@
|
||||
package org.qortal.crosschain;
|
||||
|
||||
import org.bitcoinj.core.Coin;
|
||||
import org.bitcoinj.core.Context;
|
||||
import org.bitcoinj.core.NetworkParameters;
|
||||
import org.bitcoinj.core.Transaction;
|
||||
@@ -14,15 +15,21 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Bitcoin extends Bitcoiny {
|
||||
|
||||
public static final String CURRENCY_CODE = "BTC";
|
||||
|
||||
private static final long MINIMUM_ORDER_AMOUNT = 100000; // 0.001 BTC minimum order, due to high fees
|
||||
// Locking fee to lock in a QORT for BTC. This is the default value that the user should reset to
|
||||
// a value inline with the BTC fee market. This is 5 sats per kB.
|
||||
private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(5_000); // 0.00005 BTC per 1000 bytes
|
||||
|
||||
// Temporary values until a dynamic fee system is written.
|
||||
private static final long NEW_FEE_AMOUNT = 6_000L;
|
||||
private static final long MINIMUM_ORDER_AMOUNT = 100_000; // 0.001 BTC minimum order, due to high fees
|
||||
|
||||
// Default value until user resets fee to compete with the current market. This is a total value for a
|
||||
// p2sh transaction, size 300 kB, 5 sats per kB
|
||||
private static final long NEW_FEE_AMOUNT = 1_500L;
|
||||
|
||||
private static final long NON_MAINNET_FEE = 1000L; // enough for TESTNET3 and should be OK for REGTEST
|
||||
|
||||
@@ -111,7 +118,7 @@ public class Bitcoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@@ -173,14 +180,14 @@ public class Bitcoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = NEW_FEE_AMOUNT;
|
||||
private AtomicLong feeRequired = new AtomicLong(NEW_FEE_AMOUNT);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@@ -196,7 +203,7 @@ public class Bitcoin extends Bitcoiny {
|
||||
// Constructors and instance
|
||||
|
||||
private Bitcoin(BitcoinNet bitcoinNet, BitcoinyBlockchainProvider blockchain, Context bitcoinjContext, String currencyCode) {
|
||||
super(blockchain, bitcoinjContext, currencyCode, bitcoinjContext.getFeePerKb());
|
||||
super(blockchain, bitcoinjContext, currencyCode, DEFAULT_FEE_PER_KB);
|
||||
this.bitcoinNet = bitcoinNet;
|
||||
|
||||
LOGGER.info(() -> String.format("Starting Bitcoin support using %s", this.bitcoinNet.name()));
|
||||
@@ -242,14 +249,14 @@ public class Bitcoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.bitcoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.bitcoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.bitcoinNet.setFeeCeiling( fee );
|
||||
this.bitcoinNet.setFeeRequired( fee );
|
||||
}
|
||||
/**
|
||||
* Returns bitcoinj transaction sending <tt>amount</tt> to <tt>recipient</tt> using 20 sat/byte fee.
|
||||
|
@@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@@ -608,7 +610,14 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -617,13 +626,14 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@@ -636,8 +646,13 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@@ -569,7 +571,14 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -578,13 +587,14 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@@ -597,8 +607,13 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@@ -8,6 +8,8 @@ import org.bitcoinj.core.*;
|
||||
import org.bitcoinj.crypto.ChildNumber;
|
||||
import org.bitcoinj.crypto.DeterministicHierarchy;
|
||||
import org.bitcoinj.crypto.DeterministicKey;
|
||||
import org.bitcoinj.crypto.HDPath;
|
||||
import org.bitcoinj.params.AbstractBitcoinNetParams;
|
||||
import org.bitcoinj.script.Script.ScriptType;
|
||||
import org.bitcoinj.script.ScriptBuilder;
|
||||
import org.bitcoinj.wallet.DeterministicKeyChain;
|
||||
@@ -25,7 +27,7 @@ import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/** Bitcoin-like (Bitcoin, Litecoin, etc.) support */
|
||||
public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
public abstract class Bitcoiny extends AbstractBitcoinNetParams implements ForeignBlockchain {
|
||||
|
||||
protected static final Logger LOGGER = LogManager.getLogger(Bitcoiny.class);
|
||||
|
||||
@@ -65,6 +67,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
// Constructors and instance
|
||||
|
||||
protected Bitcoiny(BitcoinyBlockchainProvider blockchainProvider, Context bitcoinjContext, String currencyCode, Coin feePerKb) {
|
||||
this.genesisBlock = this.getGenesisBlock();
|
||||
this.blockchainProvider = blockchainProvider;
|
||||
this.bitcoinjContext = bitcoinjContext;
|
||||
this.currencyCode = currencyCode;
|
||||
@@ -74,6 +77,15 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
}
|
||||
|
||||
// Getters & setters
|
||||
@Override
|
||||
public String getPaymentProtocolId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Block getGenesisBlock() {
|
||||
return this.genesisBlock;
|
||||
}
|
||||
|
||||
public BitcoinyBlockchainProvider getBlockchainProvider() {
|
||||
return this.blockchainProvider;
|
||||
@@ -83,6 +95,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
return this.bitcoinjContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCurrencyCode() {
|
||||
return this.currencyCode;
|
||||
}
|
||||
@@ -589,15 +602,27 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
|
||||
return new AddressInfo(
|
||||
address.toString(),
|
||||
toIntegerList( key.getPath()),
|
||||
toIntegerList( key.getPath() ),
|
||||
summingUnspentOutputs(address.toString()),
|
||||
key.getPathAsString(),
|
||||
transactionCount,
|
||||
candidates.contains(address.toString()));
|
||||
}
|
||||
|
||||
private static List<Integer> toIntegerList(ImmutableList<ChildNumber> path) {
|
||||
/**
|
||||
* <p>Convert BitcoinJ native type to List of Integers, BitcoinJ v16 compatible
|
||||
* </p>
|
||||
*
|
||||
* @param path path to deterministic key
|
||||
* @return Array of Ints representing the keys position in the tree
|
||||
* @since v4.7.2
|
||||
*/
|
||||
private static List<Integer> toIntegerList(HDPath path) {
|
||||
return path.stream().map(ChildNumber::num).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
// BitcoinJ v15 compatible
|
||||
private static List<Integer> toIntegerList(ImmutableList<ChildNumber> path) {
|
||||
return path.stream().map(ChildNumber::num).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@@ -839,9 +864,9 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
} while (true);
|
||||
}
|
||||
|
||||
public abstract long getFeeCeiling();
|
||||
public abstract long getFeeRequired();
|
||||
|
||||
public abstract void setFeeCeiling(long fee);
|
||||
public abstract void setFeeRequired(long fee);
|
||||
|
||||
// UTXOProvider support
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
package org.qortal.crosschain;
|
||||
|
||||
import org.bitcoinj.core.Block;
|
||||
import org.bitcoinj.core.Coin;
|
||||
import org.bitcoinj.core.Context;
|
||||
import org.bitcoinj.core.NetworkParameters;
|
||||
@@ -89,7 +90,7 @@ public class BitcoinyTBD extends Bitcoiny {
|
||||
NetTBD netTBD
|
||||
= new NetTBD(
|
||||
bitcoinyTBDRequest.getNetworkName(),
|
||||
bitcoinyTBDRequest.getFeeCeiling(),
|
||||
bitcoinyTBDRequest.getFeeRequired(),
|
||||
networkParams,
|
||||
Collections.emptyList(),
|
||||
bitcoinyTBDRequest.getExpectedGenesisHash()
|
||||
@@ -134,18 +135,30 @@ public class BitcoinyTBD extends Bitcoiny {
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) throws ForeignBlockchainException {
|
||||
|
||||
return this.netTBD.getFeeCeiling();
|
||||
return this.netTBD.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
public long getFeeRequired() {
|
||||
|
||||
return this.netTBD.getFeeCeiling();
|
||||
return this.netTBD.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.netTBD.setFeeCeiling( fee );
|
||||
this.netTBD.setFeeRequired( fee );
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPaymentProtocolId() {
|
||||
return params.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Block getGenesisBlock() {
|
||||
if(genesisBlock == null)
|
||||
genesisBlock = params.getGenesisBlock();
|
||||
return this.genesisBlock;
|
||||
}
|
||||
}
|
@@ -98,9 +98,10 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
|
||||
|
||||
LOGGER.info( "Creating Genesis Block ...");
|
||||
|
||||
// BitcoinJ v16 has a new native method for this
|
||||
//this.genesisBlock = CoinParamsUtil.createGenesisBlockFromRequest(this, request);
|
||||
|
||||
LOGGER.info("Created Genesis Block: genesisBlock = " + genesisBlock );
|
||||
// LOGGER.info("Created Genesis Block: genesisBlock = " + genesisBlock );
|
||||
|
||||
// this is 100 for each coin from what I can tell
|
||||
this.spendableCoinbaseDepth = 100;
|
||||
@@ -113,8 +114,9 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
|
||||
//
|
||||
// LOGGER.info("request = " + request);
|
||||
//
|
||||
// checkState(genesisHash.equals(request.getExpectedGenesisHash()));
|
||||
this.alertSigningKey = Hex.decode(request.getPubKey());
|
||||
// checkState(genesisHash.equals(request.getExpectedGenesisHash()))
|
||||
// alertSigningKey is removed in v16
|
||||
// this.alertSigningKey = Hex.decode(request.getPubKey());
|
||||
|
||||
this.majorityEnforceBlockUpgrade = request.getMajorityEnforceBlockUpgrade();
|
||||
this.majorityRejectBlockOutdated = request.getMajorityRejectBlockOutdated();
|
||||
@@ -221,6 +223,12 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Block getGenesisBlock() {
|
||||
//ToDo: Finish
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the difficulty target expected for the next block. This includes all
|
||||
* the weird cases for Litecoin such as testnet blocks which can be maximum
|
||||
|
@@ -14,6 +14,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Digibyte extends Bitcoiny {
|
||||
|
||||
@@ -59,7 +60,7 @@ public class Digibyte extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@@ -109,14 +110,14 @@ public class Digibyte extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@@ -178,13 +179,13 @@ public class Digibyte extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.digibyteNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.digibyteNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.digibyteNet.setFeeCeiling( fee );
|
||||
this.digibyteNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@@ -569,7 +571,14 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -578,13 +587,14 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@@ -597,8 +607,13 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@@ -13,6 +13,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Dogecoin extends Bitcoiny {
|
||||
|
||||
@@ -60,7 +61,7 @@ public class Dogecoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@@ -110,14 +111,14 @@ public class Dogecoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@@ -179,13 +180,13 @@ public class Dogecoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.dogecoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.dogecoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.dogecoinNet.setFeeCeiling( fee );
|
||||
this.dogecoinNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@@ -566,7 +568,14 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -575,13 +584,14 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@@ -594,8 +604,13 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user