Compare commits

...

124 Commits

Author SHA1 Message Date
Ice
6f628be053 Update pom.xml - Deps
Corrections for NTP slipage at start up
2025-07-20 03:18:52 -04:00
Ice
eb07c45955 Merge pull request #255 from IceBurst/master
* Abstraction of AltCoinJ 
* Abstraction of CIYAM
* Update to BouncyCastle
2025-07-13 14:08:15 -04:00
Ice
8bea11bc52 Merge branch 'master' into master 2025-07-13 14:06:11 -04:00
Qortal-Auto-Update
415f594b25 Bump version to 5.0.2 2025-07-12 15:46:54 -07:00
crowetic
1e593cdf13 Merge pull request #263 from crowetic/master
updated minPeerVersion to 5.0.0 and removed duplicate entry in pom
2025-07-12 15:43:45 -07:00
71d2fbe0b6 updated minPeerVersion to 5.0.0 and removed duplicate entry in pom 2025-07-12 15:42:26 -07:00
crowetic
5a760db37d Merge pull request #262 from kennycud/master
Full Send 

Tested and ready
2025-07-12 15:30:11 -07:00
kennycud
05d629e717 removed logging spam 2025-07-12 14:03:35 -07:00
kennycud
cea63e7ec7 PeerSendManagement support for sending all messages through a queue 2025-07-12 14:02:19 -07:00
Qortal-Auto-Update
5fabc7792c Bump version to 5.0.1 2025-07-10 13:56:06 -07:00
crowetic
09d0af9b78 Merge pull request #260 from kennycud/master
Promising QDN Improvements
2025-07-10 13:51:35 -07:00
crowetic
698e616bc9 Merge pull request #261 from crowetic/master
added new auto-update scripts
2025-07-10 13:50:21 -07:00
6c0a9b3539 added new auto-update scripts 2025-07-10 13:47:10 -07:00
kennycud
60811f9f65 log spam reduction 2025-07-10 13:38:02 -07:00
kennycud
d91a777ffd Delete qortal.log 2025-07-10 13:32:30 -07:00
kennycud
c19cad020e Merge pull request #14 from Philreact/master-11
PeerSendManager
2025-07-10 13:18:14 -07:00
52519e3662 PeerSendManagement loose-ends 2025-07-10 23:16:42 +03:00
fd62e6156c increase request timeout 2025-07-10 17:38:32 +03:00
e5890b3b6f added cooling period in case of re-connections 2025-07-10 17:38:25 +03:00
256baeb1f4 reduce interval cleanup 2025-07-10 17:37:46 +03:00
05b83ade47 remove unused code 2025-07-10 17:37:39 +03:00
f7cb4ce264 PeerSendManger added 2025-07-10 17:37:25 +03:00
086ed6574f Merge remote-tracking branch 'kenny/master' into master-10 2025-07-09 22:38:00 +03:00
kennycud
4b56690118 qdn relay optimizations 2025-07-09 12:34:47 -07:00
kennycud
44d26b513a waiting and retrying clogged write channels 2025-07-08 13:42:49 -07:00
kennycud
dbd900f74a peer fetcher executor shutdown for inactivity, thanks to philreact research, peer fetcher thread naming added 2025-07-08 05:43:30 -07:00
kennycud
38463f6b1a follower compile error fix 2025-07-07 14:51:24 -07:00
kennycud
16e48aba04 follower initial implementation 2025-07-07 14:34:55 -07:00
kennycud
56d97457a1 Merge remote-tracking branch 'origin/master' 2025-07-07 14:32:25 -07:00
kennycud
2167d2f8fe reduced logging spam 2025-07-07 14:30:45 -07:00
kennycud
8425d62673 Merge pull request #13 from Philreact/bugfix/data-renderer-name-spaces
replace name spaces with encoded space
2025-07-05 05:06:15 -07:00
4995bee3e3 replace name spaces with encoded space 2025-07-05 07:03:55 +03:00
Qortal-Auto-Update
87897d7db8 Bump version to 5.0.0 2025-07-03 16:17:36 -07:00
crowetic
49e9a53c6a Merge pull request #257 from kennycud/master
Additional Settings and Timeout threshold update
2025-07-03 15:52:11 -07:00
kennycud
b5c4599005 Merge branch 'Qortal:master' into master 2025-07-03 15:44:22 -07:00
kennycud
3aabedda92 increased additional thresholds for auto update release 2025-07-03 15:37:01 -07:00
crowetic
dd88decc40 Added FeatureTrigger block heights
Feature trigger for multipleNamesPerAccountHeight and mintedBlocksAdjustmentRemovalHeight SET. Estimated Activation time: Friday, July 4th, 2025
2025-07-03 15:26:00 -07:00
crowetic
4f3b4e4a58 Merge pull request #256 from kennycud/master
Foreign Fees Manager, Multiple Names, QDN Oprtimaizations - Tested for minimum 1 week, most longer.
2025-07-03 15:07:39 -07:00
kennycud
b2c72c3927 null pointer solution by using an empty list instead of a null value 2025-06-29 11:08:42 -07:00
kennycud
65c014b215 removed redundant data collecting, reintroduced relay timeout threshold 2025-06-27 14:13:14 -07:00
kennycud
b2579a457c reverting the GET_ARBITRARY_DATA_FILE thread limit, because it puts too much pressure on the peers with the previously lower limit, planning on updating this to a higher number right before the next release when all nodes are ready for it 2025-06-27 14:09:08 -07:00
kennycud
170668ef78 reduced logging levels on numerous messages 2025-06-27 14:04:39 -07:00
kennycud
b48b6b9d42 added test cases for single file websites 2025-06-27 14:01:51 -07:00
kennycud
22dc3e55df Merge pull request #12 from Philreact/bugfix/allow-blob-connect
Bugfix/allow blob connect
2025-06-23 12:50:32 -07:00
kennycud
66bfed93ee Merge pull request #11 from IceBurst/patch-1
Logging for Failed Respository Connections on Optional Runs
2025-06-23 12:49:40 -07:00
b8e1712881 add blob: to connect-src directive 2025-06-23 13:48:35 +03:00
6a5013d378 Merge remote-tracking branch 'kenny/master' into master-kenny3 2025-06-20 02:18:23 +03:00
kennycud
3687455c62 increasing arbitrary data message thread limits, because the algorithms can handle it 2025-06-18 17:57:18 -07:00
kennycud
60b3bacd15 reduced arbitrary data storage addition and deletion thresholds from 98% and 90% to 90% and 80% 2025-06-18 17:55:30 -07:00
kennycud
7a7f0e53ac reduced index caching errors to warnings, because it is only an error if it continually happens 2025-06-17 15:56:04 -07:00
kennycud
940c641759 removed stack trace from streaming error warnings 2025-06-17 15:10:37 -07:00
kennycud
a3bb6638bf added support for single file websites 2025-06-17 15:09:11 -07:00
kennycud
5b402e0bca validate name buyer's balance relative to the amount of the name purchase in addition to the fee 2025-06-17 15:08:20 -07:00
kennycud
89236d6504 no longer repackaging missing data exceptions as io exceptions when loading json data for indices 2025-06-14 13:11:19 -07:00
kennycud
47e313067f fixed a flaw in the blocks minted adjustment removal feature, instead of increasing or decreasing the level we need to reset the level when it is incorrect 2025-06-13 12:13:52 -07:00
Ice
92077f2912 Logging for Failed Respository Connections on Optional Runs 2025-06-11 15:45:08 -04:00
Ice
95e12395ae Merge pull request #1 from IceBurst/Abstract-and-Update-Deps
Abstract and update deps
2025-06-11 03:15:34 -04:00
Ice
47e5c473b3 Merge branch 'master' into Abstract-and-Update-Deps 2025-06-11 03:15:22 -04:00
kennycud
15f793ccb4 Merge remote-tracking branch 'origin/master' 2025-06-09 18:26:01 -07:00
kennycud
ccb59559d6 the bootstrapper was resetting the database configuration that the db cache was dependent on, so that dependency was changed 2025-06-09 18:25:43 -07:00
MergeMerc
30c5136c44 Add Logging for failing to get a Repository Connection for Non-Required/Non-Blocking Tasks 2025-06-09 13:34:05 -04:00
kennycud
91a58c50e1 Merge pull request #10 from Philreact/master-kenny3
add cleanup of leftover chunks at startup
2025-06-06 19:54:36 -07:00
f8daf50ccb Merge remote-tracking branch 'kenny/master' into master-kenny3 2025-06-07 05:43:47 +03:00
kennycud
8e0e455d41 blocks minted adjustments removal is a new feature trigger
primary names are now used throughout the chat repository

numerous message handlers have been optimized, many message handlers are now getting added to a list and scheduled for processing and when they get processed, the database gets queried significantly less, because the message requests and responses are getting batched together for database access rather than querying the database one by one, the thread limits for these message types have been significantly increased, because each individual thread coming in does very little, all it does is add the message to a list to be scheduled at a later time
2025-06-06 19:01:09 -07:00
6145db5357 add cleanup of chunks at startup 2025-06-03 03:33:35 +03:00
kennycud
7ccd06e5c3 Merge pull request #9 from Philreact/master-kenny3
fix in digest, was putting whole file in memory.
2025-06-01 10:43:35 -07:00
517f7b92d5 in memory to stream 2025-06-01 20:31:36 +03:00
kennycud
fa8b9f2cee Merge pull request #8 from Philreact/fix/load-data
fix issue of not breaking when file is complete
2025-05-28 17:21:55 -07:00
d66616f375 fix issue of not breaking when file is complete 2025-05-28 16:29:52 +03:00
kennycud
02e10e9de9 invalidated name buys and sales that violate primary names 2025-05-27 08:15:50 -07:00
kennycud
61c010754e Merge branch 'Qortal:master' into master 2025-05-25 12:20:58 -07:00
kennycud
5013c68b61 Merge pull request #7 from Philreact/feature/allow-for-unlimited-size-publishes
Feature/allow for unlimited size publishes
2025-05-25 11:45:27 -07:00
140d86e209 added comments 2025-05-24 22:29:33 +03:00
9e4925c8dd added back comments 2025-05-24 19:15:36 +03:00
kennycud
88fe3b0af6 primary names implementation 2025-05-23 17:49:26 -07:00
Ice
e6f032a2a9 Merge pull request #253 from IceBurst/IceBurst-Unit-Tests-Updates
Unit Test Updates
2025-05-19 15:34:27 -04:00
ca88cb1f88 allow downloads 2025-05-19 16:55:12 +03:00
58ab02c4f0 fix to temp dir 2025-05-18 23:21:49 +03:00
e1ea8d65f8 fix blank filename issue 2025-05-16 23:39:32 +03:00
1c52c18d32 added endpoints 2025-05-16 15:49:47 +03:00
2cd5f9e4cd change limit 2025-05-16 01:18:02 +03:00
f2b5802d9c change to streaming 2025-05-16 01:17:01 +03:00
bc4e0716db fix streaming for base64 2025-05-15 16:56:53 +03:00
994761a87e added missing requires 2025-05-15 01:20:40 +03:00
5780a6de7d remove zip best speed 2025-05-14 20:21:13 +03:00
8c811ef1ef initial 2025-05-14 20:00:04 +03:00
kennycud
f5a4a0a16c Merge remote-tracking branch 'origin/master' 2025-05-13 11:14:08 -07:00
kennycud
93dab1a3e3 detailed test case for the invite orphan vulnerability patch that was committed in 2/1/25 2025-05-13 11:13:55 -07:00
Ice
7d14d381bc Merge pull request #235 from infinitydaemon/patch-2
Update SellNameTransaction.java
2025-05-12 16:32:09 -04:00
kennycud
6511086d18 Merge pull request #6 from Philreact/master-kenny2
pass ui language to qapps
2025-05-10 12:36:57 -07:00
70ae122f5c pass ui lang to qapps 2025-05-10 22:21:13 +03:00
Ice
33475ace00 Merge pull request #236 from infinitydaemon/patch-3
Update CancelSellNameTransaction.java
2025-05-10 04:23:09 -04:00
kennycud
88d009c979 multiple registered names for single accounts API call now returns ordered by time of registration, earliest to latest 2025-05-06 15:26:24 -07:00
kennycud
26a345a909 introducing feature trigger that enables multiple registered names for single accounts 2025-05-04 11:52:09 -07:00
Ice
618945620d Abstract CIYAM.AT out of Repo 2025-04-29 07:13:34 -04:00
Ice
b6d3e407c8 Updates to Dependencies - Test Improvements 2025-04-28 07:25:58 -04:00
kennycud
4b74bb37dc unsigned fee event handling now provides address 2025-04-27 15:02:28 -07:00
kennycud
17b2bf3848 added logging and added positive boolean to the fee waiting and unsigned fee events 2025-04-26 17:53:41 -07:00
kennycud
1f6ee72fc5 the message types were corrected 2025-04-26 09:58:13 -07:00
kennycud
83bc84909a Merge branch 'master' of https://github.com/kennycud/qortal 2025-04-25 17:55:02 -07:00
kennycud
144d6cc5c7 foreign fees manager implementation, feeCeiling -> feeRequired name change, thread-safety measures for fee values, fee backup file implementation, unsigned fees socket implementation 2025-04-25 17:51:01 -07:00
crowetic
eff2e6d150 Merge pull request #249 from IceBurst/hsqldb-2.7.4-build-update
Hsqldb 2.7.4 build update
2025-04-24 15:17:40 -07:00
crowetic
c1041d2ad3 Merge pull request #192 from karl-dv/master
Some small corrections for "NL" translations
2025-04-24 14:22:04 -07:00
crowetic
699d8815c4 Merge branch 'master' into master 2025-04-24 14:21:54 -07:00
Ice
2a97fba108 Merge remote-tracking branch 'origin/IceBurst-Unit-Tests-Updates' into Abstract-and-Update-Deps 2025-04-24 03:45:38 -04:00
Ice
f1a0472c57 Corrections for Unit Tests - Lots of Corrections 2025-04-24 03:27:28 -04:00
Ice
c4d8a17355 Merge branch 'hsqldb-2.7.4-build-update' into IceBurst-Unit-Tests-Updates 2025-04-17 06:34:26 -04:00
Ice
9c1cb9da77 Update test-chain-v2-reward-levels.json
Add Missing Feature
2025-04-17 06:23:47 -04:00
Ice
7dae60d35f Update test-settings-v2-block-archive.json
Performance Improvement of 00% for block archive tests
2025-04-16 16:10:23 -04:00
Ice
8421336016 Update pr-testing.yml
-- Process 'Install' to load Deps Before testing
2025-04-16 15:11:04 -04:00
Ice
2e7cd93716 Delete .github/workflows/pr-testomg 2025-04-16 15:07:52 -04:00
Ice
2cf0aeac22 Update pr-testing.yml 2025-04-16 14:30:10 -04:00
Ice
cc4056047e Create pr-testomg 2025-04-15 15:45:00 -04:00
Ice
421e241729 Update test-chain-v2-founder-rewards.json
Correction for Test - testFounderrewards
2025-04-15 14:51:25 -04:00
Ice
c977660c47 Update Service.java
Add qortal as valid extension for QCHAT_ATTACHMENT, needed when fetching a previous TX
2025-04-15 10:40:10 -04:00
Ice
867d0e29e0 Merge branch 'Qortal:master' into IceBurst-Unit-Tests-Updates 2025-04-15 08:16:40 -04:00
Ice
57d12b4afe block-archive test performance improvement
Added parameter: "archivingPause": 5
Default Value is: 3000
2025-04-15 08:06:39 -04:00
Ice
0fae20a3c3 Update README.md
Added IntelliJ Information
2025-03-02 10:14:35 -05:00
Ice
a90f217212 Update pom.xml
Changes for hsqldb to use local 2.7.4 version with modified manifest
2025-02-24 14:03:39 -05:00
Ice
e40a77542b New hsqldb-2.7.4 with modified manifest 2025-02-24 13:58:32 -05:00
Ice
80b24b185f Create Notes.txt 2025-02-24 13:55:10 -05:00
cwd.systems | 0KN
15105306d1 Update CancelSellNameTransaction.java 2024-11-30 19:20:27 +06:00
cwd.systems | 0KN
3ddef1e13f Update SellNameTransaction.java 2024-11-30 19:19:25 +06:00
karl-dv
991636ccad Some small corrections for "NL" translations 2024-05-15 07:34:44 +02:00
172 changed files with 7998 additions and 1658 deletions

View File

@@ -1,7 +1,7 @@
name: PR testing
on:
pull_request:
push:
branches: [ master ]
jobs:
@@ -22,6 +22,10 @@ jobs:
java-version: '11'
distribution: 'adopt'
- name: Load custom deps
run: |
mvn install -DskipTests=true --file pom.xml
- name: Run all tests
run: |
mvn -B clean test -DskipTests=false --file pom.xml

View File

@@ -15,20 +15,31 @@ Building the future one block at a time. Welcome to Qortal.
# Building the Qortal Core from Source
## Build / run
## Build / Run
- Requires Java 11. OpenJDK 11 recommended over Java SE.
- Install Maven
- Use Maven to fetch dependencies and build: `mvn clean package`
- Update Maven dependencies: `mvn install`
- Built JAR should be something like `target/qortal-1.0.jar`
- Create basic *settings.json* file: `echo '{}' > settings.json`
- Run JAR in same working directory as *settings.json*: `java -jar target/qortal-1.0.jar`
- Wrap in shell script, add JVM flags, redirection, backgrounding, etc. as necessary.
- Or use supplied example shell script: *start.sh*
## IntelliJ IDEA Configuration
- Run -> Edit Configurations
- Add New Application
- Name: qortal
- SDK: java 11
- Main Class: org.qortal.controller.Controller
- Program arguments: settings.json -Dlog4j.configurationFile=log4j2.properties -ea
- Environment variables: Djava.net.preferIPv4Stack=false
# Using a pre-built Qortal 'jar' binary
If you would prefer to utilize a released version of Qortal, you may do so by downloading one of the available releases from the releases page, that are also linked on https://qortal.org and https://qortal.dev.
If you prefer to utilize a released version of Qortal, you may do so by downloading one of the available releases from the releases page, that are also linked on https://qortal.org and https://qortal.dev.
# Learning Q-App Development

Binary file not shown.

View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.3.7</version>
<description>POM was created from install:install-file</description>
</project>

Binary file not shown.

View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.3.8</version>
<description>POM was created from install:install-file</description>
</project>

Binary file not shown.

View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.4.0</version>
<description>POM was created from install:install-file</description>
</project>

Binary file not shown.

View File

@@ -1,123 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.4.1</version>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<skipTests>false</skipTests>
<bouncycastle.version>1.69</bouncycastle.version>
<junit.version>4.13.2</junit.version>
<maven-compiler-plugin.version>3.11.0</maven-compiler-plugin.version>
<maven-jar-plugin.version>3.3.0</maven-jar-plugin.version>
<maven-javadoc-plugin.version>3.6.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>3.3.0</maven-source-plugin.version>
<maven-surefire-plugin.version>3.2.2</maven-surefire-plugin.version>
</properties>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
<testSourceDirectory>src/test/java</testSourceDirectory>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>11</source>
<target>11</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<skipTests>${skipTests}</skipTests>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadoc</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>${maven-jar-plugin.version}</version>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>${maven-jar-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
<version>${bouncycastle.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

Binary file not shown.

View File

@@ -1,123 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.4.2</version>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<skipTests>false</skipTests>
<bouncycastle.version>1.70</bouncycastle.version>
<junit.version>4.13.2</junit.version>
<maven-compiler-plugin.version>3.13.0</maven-compiler-plugin.version>
<maven-source-plugin.version>3.3.0</maven-source-plugin.version>
<maven-javadoc-plugin.version>3.6.3</maven-javadoc-plugin.version>
<maven-surefire-plugin.version>3.2.5</maven-surefire-plugin.version>
<maven-jar-plugin.version>3.4.1</maven-jar-plugin.version>
</properties>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
<testSourceDirectory>src/test/java</testSourceDirectory>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>11</source>
<target>11</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<skipTests>${skipTests}</skipTests>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadoc</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>${maven-jar-plugin.version}</version>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>${maven-jar-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
<version>${bouncycastle.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<metadata>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<versioning>
<release>1.4.2</release>
<versions>
<version>1.3.7</version>
<version>1.3.8</version>
<version>1.4.0</version>
<version>1.4.1</version>
<version>1.4.2</version>
</versions>
<lastUpdated>20240426084210</lastUpdated>
</versioning>
</metadata>

View File

@@ -0,0 +1,5 @@
This is the production hsqldb-2.7.4 with the manifest file updated
Sealed: false
Allows the addition of the custom Qortal HSQLDBPool and Monitoring Classes

Binary file not shown.

94
pom.xml
View File

@@ -3,18 +3,19 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.qortal</groupId>
<artifactId>qortal</artifactId>
<version>4.7.1</version>
<version>5.1.0</version> <!-- Version must be <X.Y.Z> -->
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<skipTests>true</skipTests>
<altcoinj.version>7dc8c6f</altcoinj.version>
<bitcoinj.version>0.15.10</bitcoinj.version>
<bouncycastle.version>1.70</bouncycastle.version>
<skipJUnitTests>true</skipJUnitTests>
<altcoinj.version>d7cf6ac</altcoinj.version> <!-- BC v16 / Updated Abstract Classes / alertSigningKey -->
<bitcoinj.version>0.16.3</bitcoinj.version>
<bouncycastle.version>1.73</bouncycastle.version>
<build.timestamp>${maven.build.timestamp}</build.timestamp>
<ciyam-at.version>1.4.2</ciyam-at.version>
<ciyam-at.version>1b731d1</ciyam-at.version> <!-- This is the hash for v1.4.3 -->
<commons-net.version>3.8.0</commons-net.version>
<!-- <commons-net.version>3.9.0</commons-net.version> v5.2.0 coming soon -->
<commons-text.version>1.12.0</commons-text.version>
<commons-io.version>2.18.0</commons-io.version>
<commons-compress.version>1.27.1</commons-compress.version>
@@ -23,6 +24,7 @@
<extendedset.version>0.12.3</extendedset.version>
<git-commit-id-plugin.version>4.9.10</git-commit-id-plugin.version>
<grpc.version>1.68.1</grpc.version>
<!-- <grpc.version>1.68.3</grpc.version> v5.2.0 coming soon -->
<guava.version>33.3.1-jre</guava.version>
<hamcrest-library.version>2.2</hamcrest-library.version>
<homoglyph.version>1.2.1</homoglyph.version>
@@ -33,6 +35,7 @@
<jaxb-runtime.version>2.3.9</jaxb-runtime.version>
<jersey.version>2.42</jersey.version>
<jetty.version>9.4.56.v20240826</jetty.version>
<!-- <jetty.version>9.4.57.v20241219</jetty.version> v5.2.0 Coming Soon -->
<json-simple.version>1.1.1</json-simple.version>
<json.version>20240303</json.version>
<jsoup.version>1.18.1</jsoup.version>
@@ -49,11 +52,16 @@
<maven-reproducible-build-plugin.version>0.17</maven-reproducible-build-plugin.version>
<maven-resources-plugin.version>3.3.1</maven-resources-plugin.version>
<maven-shade-plugin.version>3.6.0</maven-shade-plugin.version>
<maven-install-plugin.version>3.1.3</maven-install-plugin.version>
<maven-surefire-plugin.version>3.5.2</maven-surefire-plugin.version>
<!-- <maven-surefire-plugin.version>3.5.3</maven-surefire-plugin.version> v5.2.0 Coming Soon -->
<protobuf.version>3.25.3</protobuf.version>
<!-- <protobuf.version>3.25.7</protobuf.version> v 5.1 -->
<replacer.version>1.5.3</replacer.version>
<simplemagic.version>1.17</simplemagic.version>
<slf4j.version>1.7.36</slf4j.version>
<!-- <swagger-api.version>2.2.30</swagger-api.version> need code upgrade Future Release -->
<!-- <swagger-api.version>2.1.13</swagger-api.version> need code upgrade Future Release -->
<swagger-api.version>2.0.10</swagger-api.version>
<swagger-ui.version>5.18.2</swagger-ui.version>
<upnp.version>1.2</upnp.version>
@@ -289,20 +297,48 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>${maven-jar-plugin.version}</version>
<configuration>
<archive>
<manifest>
<addDefaultEntries>false</addDefaultEntries>
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
</manifest>
<manifestEntries>
<Last-Commit-Id>${git.commit.id.full}</Last-Commit-Id>
<Last-Commit-Time>${git.commit.time}</Last-Commit-Time>
<Reproducible-Build>true</Reproducible-Build>
</manifestEntries>
</archive>
</configuration>
<executions>
<execution>
<configuration>
<archive>
<manifest>
<addDefaultEntries>false</addDefaultEntries>
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
</manifest>
<manifestEntries>
<Last-Commit-Id>${git.commit.id.full}</Last-Commit-Id>
<Last-Commit-Time>${git.commit.time}</Last-Commit-Time>
<Reproducible-Build>true</Reproducible-Build>
</manifestEntries>
</archive>
</configuration>
</execution>
</executions>
</plugin>
<!-- Copy modified hsqldb.jar to install / modified MANIFEST.MF-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-install-plugin</artifactId>
<version>${maven-install-plugin.version}</version>
<configuration>
<groupId>org.hsqldb</groupId>
<artifactId>hsqldb</artifactId>
<version>${hsqldb.version}</version>
<packaging>jar</packaging>
</configuration>
<executions>
<execution>
<phase>install</phase>
<goals>
<goal>install-file</goal>
</goals>
<configuration>
<file>${project.basedir}/lib/org/hsqldb/hsqldb/${hsqldb.version}/hsqldb.jar</file>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
@@ -352,6 +388,7 @@
</execution>
</executions>
</plugin>
<!-- Removed, now use Maven reproducible by default v4.0, IntelliJ v2025.1 and later -->
<plugin>
<groupId>io.github.zlika</groupId>
<artifactId>reproducible-build-maven-plugin</artifactId>
@@ -374,7 +411,7 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<skipTests>${skipTests}</skipTests>
<skipTests>${skipJUnitTests}</skipTests>
</configuration>
</plugin>
</plugins>
@@ -450,7 +487,7 @@
<scope>provided</scope>
<!-- needed for build, not for runtime -->
</dependency>
<!-- HSQLDB for repository -->
<!-- HSQLDB for repository should use local version with Sealed: false -->
<dependency>
<groupId>org.hsqldb</groupId>
<artifactId>hsqldb</artifactId>
@@ -458,7 +495,7 @@
</dependency>
<!-- CIYAM AT (automated transactions) -->
<dependency>
<groupId>org.ciyam</groupId>
<groupId>com.github.iceburst</groupId>
<artifactId>AT</artifactId>
<version>${ciyam-at.version}</version>
</dependency>
@@ -476,7 +513,7 @@
</dependency>
<!-- For Litecoin, etc. support, requires bitcoinj -->
<dependency>
<groupId>com.github.qortal</groupId>
<groupId>com.github.iceburst</groupId>
<artifactId>altcoinj</artifactId>
<version>${altcoinj.version}</version>
</dependency>
@@ -721,12 +758,12 @@
<!-- BouncyCastle for crypto, including TLS secure networking -->
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
<artifactId>bcprov-jdk15to18</artifactId>
<version>${bouncycastle.version}</version>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bctls-jdk15on</artifactId>
<artifactId>bctls-jdk15to18</artifactId>
<version>${bouncycastle.version}</version>
</dependency>
<dependency>
@@ -770,5 +807,10 @@
<artifactId>jaxb-runtime</artifactId>
<version>${jaxb-runtime.version}</version>
</dependency>
<dependency>
<groupId>org.apache.tika</groupId>
<artifactId>tika-core</artifactId>
<version>3.1.0</version>
</dependency>
</dependencies>
</project>

View File

@@ -2,12 +2,14 @@ package org.qortal.account;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource;
import org.qortal.block.BlockChain;
import org.qortal.controller.LiteNode;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.GroupRepository;
import org.qortal.repository.NameRepository;
@@ -19,7 +21,11 @@ import org.qortal.utils.Groups;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static org.qortal.utils.Amounts.prettyAmount;
@@ -361,6 +367,142 @@ public class Account {
return accountData.getLevel();
}
/**
* Get Primary Name
*
* @return the primary name for this address if present, otherwise empty
*
* @throws DataException
*/
public Optional<String> getPrimaryName() throws DataException {
return this.repository.getNameRepository().getPrimaryName(this.address);
}
/**
* Remove Primary Name
*
* @throws DataException
*/
public void removePrimaryName() throws DataException {
this.repository.getNameRepository().removePrimaryName(this.address);
}
/**
* Reset Primary Name
*
* Set primary name based on the names (and their history) this account owns.
*
* @param confirmationStatus the status of the transactions for the determining the primary name
*
* @return the primary name, empty if their isn't one
*
* @throws DataException
*/
public Optional<String> resetPrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
Optional<String> primaryName = determinePrimaryName(confirmationStatus);
if(primaryName.isPresent()) {
return setPrimaryName(primaryName.get());
}
else {
return primaryName;
}
}
/**
* Determine Primary Name
*
* Determine primary name based on a list of registered names.
*
* @param confirmationStatus the status of the transactions for this determination
*
* @return the primary name, empty if there is no primary name
*
* @throws DataException
*/
public Optional<String> determinePrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
// all registered names for the owner
List<NameData> names = this.repository.getNameRepository().getNamesByOwner(this.address);
Optional<String> primaryName;
// if no registered names, the no primary name possible
if (names.isEmpty()) {
primaryName = Optional.empty();
}
// if names
else {
// if one name, then that is the primary name
if (names.size() == 1) {
primaryName = Optional.of( names.get(0).getName() );
}
// if more than one name, then seek the earliest name acquisition that was never released
else {
Map<String, TransactionData> txByName = new HashMap<>(names.size());
// for each name, get the latest transaction
for (NameData nameData : names) {
// since the name is currently registered to the owner,
// we assume the latest transaction involving this name was the transaction that the acquired
// name through registration, purchase or update
Optional<TransactionData> latestTransaction
= this.repository
.getTransactionRepository()
.getTransactionsInvolvingName(
nameData.getName(),
confirmationStatus
)
.stream()
.sorted(Comparator.comparing(
TransactionData::getTimestamp).reversed()
)
.findFirst(); // first is the last, since it was reversed
// if there is a latest transaction, expected for all registered names
if (latestTransaction.isPresent()) {
txByName.put(nameData.getName(), latestTransaction.get());
}
// if there is no latest transaction, then
else {
LOGGER.warn("No matching transaction for name: " + nameData.getName());
}
}
// get the first name aqcuistion for this address
Optional<Map.Entry<String, TransactionData>> firstNameEntry
= txByName.entrySet().stream().sorted(Comparator.comparing(entry -> entry.getValue().getTimestamp())).findFirst();
// if their is a name acquisition, then the first one is the primary name
if (firstNameEntry.isPresent()) {
primaryName = Optional.of( firstNameEntry.get().getKey() );
}
// if there is no nameacquistion, then there is no primary name
else {
primaryName = Optional.empty();
}
}
}
return primaryName;
}
/**
* Set Primary Name
*
* @param primaryName the primary to set to this address
*
* @return the primary name if successful, empty if unsuccessful
*
* @throws DataException
*/
public Optional<String> setPrimaryName( String primaryName ) throws DataException {
int changed = this.repository.getNameRepository().setPrimaryName(this.address, primaryName);
return changed > 0 ? Optional.of(primaryName) : Optional.empty();
}
/**
* Returns reward-share minting address, or unknown if reward-share does not exist.
*

View File

@@ -1,17 +1,41 @@
package org.qortal.account;
import org.bouncycastle.crypto.generators.Ed25519KeyPairGenerator;
import org.bouncycastle.crypto.params.Ed25519KeyGenerationParameters;
import org.bouncycastle.crypto.params.Ed25519PublicKeyParameters;
import org.qortal.crypto.Crypto;
import org.qortal.data.account.AccountData;
import org.qortal.repository.Repository;
import java.security.SecureRandom;
public class PublicKeyAccount extends Account {
protected final byte[] publicKey;
protected final Ed25519PublicKeyParameters edPublicKeyParams;
/** <p>Constructor for generating a PublicKeyAccount</p>
*
* @param repository Block Chain
* @param publicKey 32 byte Public Key
* @since v4.7.3
*/
public PublicKeyAccount(Repository repository, byte[] publicKey) {
this(repository, new Ed25519PublicKeyParameters(publicKey, 0));
super(repository, Crypto.toAddress(publicKey));
Ed25519PublicKeyParameters t = null;
try {
t = new Ed25519PublicKeyParameters(publicKey, 0);
} catch (Exception e) {
var gen = new Ed25519KeyPairGenerator();
gen.init(new Ed25519KeyGenerationParameters(new SecureRandom()));
var keyPair = gen.generateKeyPair();
t = (Ed25519PublicKeyParameters) keyPair.getPublic();
} finally {
this.edPublicKeyParams = t;
}
this.publicKey = publicKey;
}
protected PublicKeyAccount(Repository repository, Ed25519PublicKeyParameters edPublicKeyParams) {

View File

@@ -46,6 +46,7 @@ public class ApiService {
private ApiService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.resource", "org.qortal.api.restricted.resource");
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
@@ -197,6 +198,7 @@ public class ApiService {
context.addServlet(DataMonitorSocket.class, "/websockets/datamonitor");
context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*");
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
context.addServlet(UnsignedFeesSocket.class, "/websockets/crosschain/unsignedfees");
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
context.addServlet(TradePresenceWebSocket.class, "/websockets/crosschain/tradepresence");

View File

@@ -40,6 +40,7 @@ public class DevProxyService {
private DevProxyService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.proxy.resource", "org.qortal.api.resource");
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);

View File

@@ -39,6 +39,7 @@ public class DomainMapService {
private DomainMapService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.resource", "org.qortal.api.domainmap.resource");
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);

View File

@@ -39,6 +39,7 @@ public class GatewayService {
private GatewayService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.resource", "org.qortal.api.gateway.resource");
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);

View File

@@ -1,14 +1,13 @@
package org.qortal.api;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.qortal.arbitrary.misc.Service;
import java.util.Objects;
public class HTMLParser {
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
@@ -22,10 +21,11 @@ public class HTMLParser {
private String identifier;
private String path;
private String theme;
private String lang;
private boolean usingCustomRouting;
public HTMLParser(String resourceId, String inPath, String prefix, boolean includeResourceIdInPrefix, byte[] data,
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting) {
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting, String lang) {
String inPathWithoutFilename = inPath.contains("/") ? inPath.substring(0, inPath.lastIndexOf('/')) : String.format("/%s",inPath);
this.qdnBase = includeResourceIdInPrefix ? String.format("%s/%s", prefix, resourceId) : prefix;
this.qdnBaseWithPath = includeResourceIdInPrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : String.format("%s%s", prefix, inPathWithoutFilename);
@@ -36,6 +36,7 @@ public class HTMLParser {
this.identifier = identifier;
this.path = inPath;
this.theme = theme;
this.lang = lang;
this.usingCustomRouting = usingCustomRouting;
}
@@ -61,9 +62,13 @@ public class HTMLParser {
String identifier = this.identifier != null ? this.identifier.replace("\\", "").replace("\"","\\\"") : "";
String path = this.path != null ? this.path.replace("\\", "").replace("\"","\\\"") : "";
String theme = this.theme != null ? this.theme.replace("\\", "").replace("\"","\\\"") : "";
String lang = this.lang != null ? this.lang.replace("\\", "").replace("\"", "\\\"") : "";
String qdnBase = this.qdnBase != null ? this.qdnBase.replace("\\", "").replace("\"","\\\"") : "";
String qdnBaseWithPath = this.qdnBaseWithPath != null ? this.qdnBaseWithPath.replace("\\", "").replace("\"","\\\"") : "";
String qdnContextVar = String.format("<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>", qdnContext, theme, service, name, identifier, path, qdnBase, qdnBaseWithPath);
String qdnContextVar = String.format(
"<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnLang=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>",
qdnContext, theme, lang, service, name, identifier, path, qdnBase, qdnBaseWithPath
);
head.get(0).prepend(qdnContextVar);
// Add base href tag

View File

@@ -304,11 +304,11 @@ public class BitcoinyTBDRequest {
private String networkName;
/**
* Fee Ceiling
* Fee Required
*
* web search, LTC fee ceiling = 1000L
* web search, LTC fee required = 1000L
*/
private long feeCeiling;
private long feeRequired;
/**
* Extended Public Key
@@ -570,8 +570,8 @@ public class BitcoinyTBDRequest {
return this.networkName;
}
public long getFeeCeiling() {
return this.feeCeiling;
public long getFeeRequired() {
return this.feeRequired;
}
public String getExtendedPublicKey() {
@@ -671,7 +671,7 @@ public class BitcoinyTBDRequest {
", minimumOrderAmount=" + minimumOrderAmount +
", feePerKb=" + feePerKb +
", networkName='" + networkName + '\'' +
", feeCeiling=" + feeCeiling +
", feeRequired=" + feeRequired +
", extendedPublicKey='" + extendedPublicKey + '\'' +
", sendAmount=" + sendAmount +
", sendingFeePerByte=" + sendingFeePerByte +

View File

@@ -142,10 +142,20 @@ public class DevProxyServerResource {
}
}
String lang = request.getParameter("lang");
if (lang == null || lang.isBlank()) {
lang = "en"; // fallback
}
String theme = request.getParameter("theme");
if (theme == null || theme.isBlank()) {
theme = "light";
}
// Parse and modify output if needed
if (HTMLParser.isHtmlFile(filename)) {
// HTML file - needs to be parsed
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, "light", true);
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, theme , true, lang);
htmlParser.addAdditionalHeaderTags();
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' ws:; font-src 'self' data:;");
response.setContentType(con.getContentType());

View File

@@ -3,6 +3,7 @@ package org.qortal.api.resource;
import com.google.common.primitives.Bytes;
import com.j256.simplemagic.ContentInfo;
import com.j256.simplemagic.ContentInfoUtil;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.media.ArraySchema;
@@ -12,6 +13,7 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.LogManager;
@@ -63,14 +65,19 @@ import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.FileNameMap;
import java.net.URLConnection;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
@@ -78,6 +85,16 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.zip.GZIPOutputStream;
import org.apache.tika.Tika;
import org.apache.tika.mime.MimeTypeException;
import org.apache.tika.mime.MimeTypes;
import javax.ws.rs.core.Response;
import org.glassfish.jersey.media.multipart.FormDataParam;
import static org.qortal.api.ApiError.REPOSITORY_ISSUE;
@Path("/arbitrary")
@Tag(name = "Arbitrary")
@@ -686,20 +703,20 @@ public class ArbitraryResource {
)
}
)
public HttpServletResponse get(@PathParam("service") Service service,
public void get(@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("filepath") String filepath,
@QueryParam("encoding") String encoding,
@QueryParam("rebuild") boolean rebuild,
@QueryParam("async") boolean async,
@QueryParam("attempts") Integer attempts) {
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
// Authentication can be bypassed in the settings, for those running public QDN nodes
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
Security.checkApiCallAllowed(request);
}
return this.download(service, name, null, filepath, encoding, rebuild, async, attempts);
this.download(service, name, null, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
}
@GET
@@ -719,21 +736,21 @@ public class ArbitraryResource {
)
}
)
public HttpServletResponse get(@PathParam("service") Service service,
public void get(@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@QueryParam("filepath") String filepath,
@QueryParam("encoding") String encoding,
@QueryParam("rebuild") boolean rebuild,
@QueryParam("async") boolean async,
@QueryParam("attempts") Integer attempts) {
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
// Authentication can be bypassed in the settings, for those running public QDN nodes
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
Security.checkApiCallAllowed(request, null);
}
return this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts);
this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
}
@@ -878,6 +895,464 @@ public class ArbitraryResource {
}
@GET
@Path("/check/tmp")
@Produces(MediaType.TEXT_PLAIN)
@Operation(
summary = "Check if the disk has enough disk space for an upcoming upload",
responses = {
@ApiResponse(description = "OK if sufficient space", responseCode = "200"),
@ApiResponse(description = "Insufficient space", responseCode = "507") // 507 = Insufficient Storage
}
)
@SecurityRequirement(name = "apiKey")
public Response checkUploadSpace(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@QueryParam("totalSize") Long totalSize) {
Security.checkApiCallAllowed(request);
if (totalSize == null || totalSize <= 0) {
return Response.status(Response.Status.BAD_REQUEST)
.entity("Missing or invalid totalSize parameter").build();
}
File uploadDir = new File("uploads-temp");
if (!uploadDir.exists()) {
uploadDir.mkdirs(); // ensure the folder exists
}
long usableSpace = uploadDir.getUsableSpace();
long requiredSpace = (long)(((double)totalSize) * 2.2); // estimate for chunks + merge
if (usableSpace < requiredSpace) {
return Response.status(507).entity("Insufficient disk space").build();
}
return Response.ok("Sufficient disk space").build();
}
@POST
@Path("/{service}/{name}/chunk")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Operation(
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource (no identifier)",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.MULTIPART_FORM_DATA,
schema = @Schema(
implementation = Object.class
)
)
),
responses = {
@ApiResponse(
description = "Chunk uploaded successfully",
responseCode = "200"
),
@ApiResponse(
description = "Error writing chunk",
responseCode = "500"
)
}
)
@SecurityRequirement(name = "apiKey")
public Response uploadChunkNoIdentifier(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") String serviceString,
@PathParam("name") String name,
@FormDataParam("chunk") InputStream chunkStream,
@FormDataParam("index") int index) {
Security.checkApiCallAllowed(request);
try {
String safeService = Paths.get(serviceString).getFileName().toString();
String safeName = Paths.get(name).getFileName().toString();
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName);
Files.createDirectories(tempDir);
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
return Response.ok("Chunk " + index + " received").build();
} catch (IOException e) {
LOGGER.error("Failed to write chunk {} for service '{}' and name '{}'", index, serviceString, name, e);
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
}
}
@POST
@Path("/{service}/{name}/finalize")
@Produces(MediaType.TEXT_PLAIN)
@Operation(
summary = "Finalize a chunked upload (no identifier) and build a raw, unsigned, ARBITRARY transaction",
responses = {
@ApiResponse(
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
content = @Content(mediaType = MediaType.TEXT_PLAIN)
)
}
)
@SecurityRequirement(name = "apiKey")
public String finalizeUploadNoIdentifier(
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") String serviceString,
@PathParam("name") String name,
@QueryParam("title") String title,
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
@QueryParam("isZip") Boolean isZip
) {
Security.checkApiCallAllowed(request);
java.nio.file.Path tempFile = null;
java.nio.file.Path tempDir = null;
java.nio.file.Path chunkDir = null;
String safeService = Paths.get(serviceString).getFileName().toString();
String safeName = Paths.get(name).getFileName().toString();
try {
chunkDir = Paths.get("uploads-temp", safeService, safeName);
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
}
String safeFilename = (filename == null || filename.isBlank()) ? "qortal-" + NTP.getTime() : filename;
tempDir = Files.createTempDirectory("qortal-");
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
tempFile = tempDir.resolve(sanitizedFilename);
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
byte[] buffer = new byte[65536];
for (java.nio.file.Path chunk : Files.list(chunkDir)
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
.sorted(Comparator.comparingInt(path -> {
String name2 = path.getFileName().toString();
String numberPart = name2.substring("chunk_".length());
return Integer.parseInt(numberPart);
})).collect(Collectors.toList())) {
try (InputStream in = Files.newInputStream(chunk)) {
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
}
}
String detectedExtension = "";
String uploadFilename = null;
boolean extensionIsValid = false;
if (filename != null && !filename.isBlank()) {
int lastDot = filename.lastIndexOf('.');
if (lastDot > 0 && lastDot < filename.length() - 1) {
extensionIsValid = true;
uploadFilename = filename;
}
}
if (!extensionIsValid) {
Tika tika = new Tika();
String mimeType = tika.detect(tempFile.toFile());
try {
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
detectedExtension = mime.getExtension();
} catch (MimeTypeException e) {
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
}
if (filename != null && !filename.isBlank()) {
int lastDot = filename.lastIndexOf('.');
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
} else {
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
}
}
Boolean isZipBoolean = false;
if (isZip != null && isZip) {
isZipBoolean = true;
}
// ✅ Call upload with `null` as identifier
return this.upload(
Service.valueOf(serviceString),
name,
null, // no identifier
tempFile.toString(),
null,
null,
isZipBoolean,
fee,
uploadFilename,
title,
description,
tags,
category,
preview
);
} catch (IOException e) {
LOGGER.error("Failed to merge chunks for service='{}', name='{}'", serviceString, name, e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
} finally {
if (tempDir != null) {
try {
Files.walk(tempDir)
.sorted(Comparator.reverseOrder())
.map(java.nio.file.Path::toFile)
.forEach(File::delete);
} catch (IOException e) {
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
}
}
try {
Files.walk(chunkDir)
.sorted(Comparator.reverseOrder())
.map(java.nio.file.Path::toFile)
.forEach(File::delete);
} catch (IOException e) {
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
}
}
}
@POST
@Path("/{service}/{name}/{identifier}/chunk")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Operation(
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.MULTIPART_FORM_DATA,
schema = @Schema(
implementation = Object.class
)
)
),
responses = {
@ApiResponse(
description = "Chunk uploaded successfully",
responseCode = "200"
),
@ApiResponse(
description = "Error writing chunk",
responseCode = "500"
)
}
)
@SecurityRequirement(name = "apiKey")
public Response uploadChunk(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") String serviceString,
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@FormDataParam("chunk") InputStream chunkStream,
@FormDataParam("index") int index) {
Security.checkApiCallAllowed(request);
try {
String safeService = Paths.get(serviceString).getFileName().toString();
String safeName = Paths.get(name).getFileName().toString();
String safeIdentifier = Paths.get(identifier).getFileName().toString();
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName, safeIdentifier);
Files.createDirectories(tempDir);
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
return Response.ok("Chunk " + index + " received").build();
} catch (IOException e) {
LOGGER.error("Failed to write chunk {} for service='{}', name='{}', identifier='{}'", index, serviceString, name, identifier, e);
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
}
}
@POST
@Path("/{service}/{name}/{identifier}/finalize")
@Produces(MediaType.TEXT_PLAIN)
@Operation(
summary = "Finalize a chunked upload and build a raw, unsigned, ARBITRARY transaction",
responses = {
@ApiResponse(
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
content = @Content(mediaType = MediaType.TEXT_PLAIN)
)
}
)
@SecurityRequirement(name = "apiKey")
public String finalizeUpload(
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") String serviceString,
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@QueryParam("title") String title,
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
@QueryParam("isZip") Boolean isZip
) {
Security.checkApiCallAllowed(request);
java.nio.file.Path tempFile = null;
java.nio.file.Path tempDir = null;
java.nio.file.Path chunkDir = null;
try {
String safeService = Paths.get(serviceString).getFileName().toString();
String safeName = Paths.get(name).getFileName().toString();
String safeIdentifier = Paths.get(identifier).getFileName().toString();
java.nio.file.Path baseUploadsDir = Paths.get("uploads-temp"); // relative to Qortal working dir
chunkDir = baseUploadsDir.resolve(safeService).resolve(safeName).resolve(safeIdentifier);
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
}
// Step 1: Determine a safe filename for disk temp file (regardless of extension correctness)
String safeFilename = filename;
if (filename == null || filename.isBlank()) {
safeFilename = "qortal-" + NTP.getTime();
}
tempDir = Files.createTempDirectory("qortal-");
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
tempFile = tempDir.resolve(sanitizedFilename);
// Step 2: Merge chunks
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
byte[] buffer = new byte[65536];
for (java.nio.file.Path chunk : Files.list(chunkDir)
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
.sorted(Comparator.comparingInt(path -> {
String name2 = path.getFileName().toString();
String numberPart = name2.substring("chunk_".length());
return Integer.parseInt(numberPart);
})).collect(Collectors.toList())) {
try (InputStream in = Files.newInputStream(chunk)) {
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
}
}
// Step 3: Determine correct extension
String detectedExtension = "";
String uploadFilename = null;
boolean extensionIsValid = false;
if (filename != null && !filename.isBlank()) {
int lastDot = filename.lastIndexOf('.');
if (lastDot > 0 && lastDot < filename.length() - 1) {
extensionIsValid = true;
uploadFilename = filename;
}
}
if (!extensionIsValid) {
Tika tika = new Tika();
String mimeType = tika.detect(tempFile.toFile());
try {
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
detectedExtension = mime.getExtension();
} catch (MimeTypeException e) {
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
}
if (filename != null && !filename.isBlank()) {
int lastDot = filename.lastIndexOf('.');
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
} else {
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
}
}
Boolean isZipBoolean = false;
if (isZip != null && isZip) {
isZipBoolean = true;
}
return this.upload(
Service.valueOf(serviceString),
name,
identifier,
tempFile.toString(),
null,
null,
isZipBoolean,
fee,
uploadFilename,
title,
description,
tags,
category,
preview
);
} catch (IOException e) {
LOGGER.error("Unexpected error in finalizeUpload for service='{}', name='{}', name='{}'", serviceString, name, identifier, e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
} finally {
if (tempDir != null) {
try {
Files.walk(tempDir)
.sorted(Comparator.reverseOrder())
.map(java.nio.file.Path::toFile)
.forEach(File::delete);
} catch (IOException e) {
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
}
}
try {
Files.walk(chunkDir)
.sorted(Comparator.reverseOrder())
.map(java.nio.file.Path::toFile)
.forEach(File::delete);
} catch (IOException e) {
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
}
}
}
// Upload base64-encoded data
@@ -1343,7 +1818,7 @@ public class ArbitraryResource {
if (path == null) {
// See if we have a string instead
if (string != null) {
if (filename == null) {
if (filename == null || filename.isBlank()) {
// Use current time as filename
filename = String.format("qortal-%d", NTP.getTime());
}
@@ -1358,7 +1833,7 @@ public class ArbitraryResource {
}
// ... or base64 encoded raw data
else if (base64 != null) {
if (filename == null) {
if (filename == null || filename.isBlank()) {
// Use current time as filename
filename = String.format("qortal-%d", NTP.getTime());
}
@@ -1409,6 +1884,7 @@ public class ArbitraryResource {
);
transactionBuilder.build();
// Don't compute nonce - this is done by the client (or via POST /arbitrary/compute)
ArbitraryTransactionData transactionData = transactionBuilder.getArbitraryTransactionData();
return Base58.encode(ArbitraryTransactionTransformer.toBytes(transactionData));
@@ -1424,22 +1900,20 @@ public class ArbitraryResource {
}
}
private HttpServletResponse download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts) {
private void download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts, boolean attachment, String attachmentFilename) {
try {
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
int attempts = 0;
if (maxAttempts == null) {
maxAttempts = 5;
}
// Loop until we have data
if (async) {
// Asynchronous
arbitraryDataReader.loadAsynchronously(false, 1);
}
else {
} else {
// Synchronous
while (!Controller.isStopping()) {
attempts++;
@@ -1449,88 +1923,189 @@ public class ArbitraryResource {
break;
} catch (MissingDataException e) {
if (attempts > maxAttempts) {
// Give up after 5 attempts
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later.");
}
}
}
Thread.sleep(3000L);
}
}
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
if (outputPath == null) {
// Assume the resource doesn't exist
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
}
if (filepath == null || filepath.isEmpty()) {
// No file path supplied - so check if this is a single file resource
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
if (files != null && files.length == 1) {
// This is a single file resource
filepath = files[0];
}
else {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA,
"filepath is required for resources containing more than one file");
} else {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "filepath is required for resources containing more than one file");
}
}
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
if (!Files.exists(path)) {
String message = String.format("No file exists at filepath: %s", filepath);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No file exists at filepath: " + filepath);
}
byte[] data;
int fileSize = (int)path.toFile().length();
int length = fileSize;
// Parse "Range" header
Integer rangeStart = null;
Integer rangeEnd = null;
if (attachment) {
String rawFilename;
if (attachmentFilename != null && !attachmentFilename.isEmpty()) {
// 1. Sanitize first
String safeAttachmentFilename = attachmentFilename.replaceAll("[\\\\/:*?\"<>|]", "_");
// 2. Check for a valid extension (35 alphanumeric chars)
if (!safeAttachmentFilename.matches(".*\\.[a-zA-Z0-9]{2,5}$")) {
safeAttachmentFilename += ".bin";
}
rawFilename = safeAttachmentFilename;
} else {
// Fallback if no filename is provided
String baseFilename = (identifier != null && !identifier.isEmpty())
? name + "-" + identifier
: name;
rawFilename = baseFilename.replaceAll("[\\\\/:*?\"<>|]", "_") + ".bin";
}
// Optional: trim length
rawFilename = rawFilename.length() > 100 ? rawFilename.substring(0, 100) : rawFilename;
// 3. Set Content-Disposition header
response.setHeader("Content-Disposition", "attachment; filename=\"" + rawFilename + "\"");
}
// Determine the total size of the requested file
long fileSize = Files.size(path);
String mimeType = context.getMimeType(path.toString());
// Attempt to read the "Range" header from the request to support partial content delivery (e.g., for video streaming or resumable downloads)
String range = request.getHeader("Range");
if (range != null) {
range = range.replace("bytes=", "");
String[] parts = range.split("-");
rangeStart = (parts != null && parts.length > 0) ? Integer.parseInt(parts[0]) : null;
rangeEnd = (parts != null && parts.length > 1) ? Integer.parseInt(parts[1]) : fileSize;
long rangeStart = 0;
long rangeEnd = fileSize - 1;
boolean isPartial = false;
// If a Range header is present and no base64 encoding is requested, parse the range values
if (range != null && encoding == null) {
range = range.replace("bytes=", ""); // Remove the "bytes=" prefix
String[] parts = range.split("-"); // Split the range into start and end
// Parse range start
if (parts.length > 0 && !parts[0].isEmpty()) {
rangeStart = Long.parseLong(parts[0]);
}
// Parse range end, if present
if (parts.length > 1 && !parts[1].isEmpty()) {
rangeEnd = Long.parseLong(parts[1]);
}
isPartial = true; // Indicate that this is a partial content request
}
if (rangeStart != null && rangeEnd != null) {
// We have a range, so update the requested length
length = rangeEnd - rangeStart;
// Calculate how many bytes should be sent in the response
long contentLength = rangeEnd - rangeStart + 1;
// Inform the client that byte ranges are supported
response.setHeader("Accept-Ranges", "bytes");
if (isPartial) {
// If partial content was requested, return 206 Partial Content with appropriate headers
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
response.setHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd, fileSize));
} else {
// Otherwise, return the entire file with status 200 OK
response.setStatus(HttpServletResponse.SC_OK);
}
if (length < fileSize && encoding == null) {
// Partial content requested, and not encoding the data
response.setStatus(206);
response.addHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd-1, fileSize));
data = FilesystemUtils.readFromFile(path.toString(), rangeStart, length);
// Initialize output streams for writing the file to the response
OutputStream rawOut = null;
OutputStream base64Out = null;
OutputStream gzipOut = null;
try {
rawOut = response.getOutputStream();
if (encoding != null && "base64".equalsIgnoreCase(encoding)) {
// If base64 encoding is requested, override content type
response.setContentType("text/plain");
// Check if the client accepts gzip encoding
String acceptEncoding = request.getHeader("Accept-Encoding");
boolean wantsGzip = acceptEncoding != null && acceptEncoding.contains("gzip");
if (wantsGzip) {
// Wrap output in GZIP and Base64 streams if gzip is accepted
response.setHeader("Content-Encoding", "gzip");
gzipOut = new GZIPOutputStream(rawOut);
base64Out = java.util.Base64.getEncoder().wrap(gzipOut);
} else {
// Wrap output in Base64 only
base64Out = java.util.Base64.getEncoder().wrap(rawOut);
}
rawOut = base64Out; // Use the wrapped stream for writing
} else {
// For raw binary output, set the content type and length
response.setContentType(mimeType != null ? mimeType : "application/octet-stream");
response.setContentLength((int) contentLength);
}
// Stream file content
try (InputStream inputStream = Files.newInputStream(path)) {
if (rangeStart > 0) {
inputStream.skip(rangeStart);
}
byte[] buffer = new byte[65536];
long bytesRemaining = contentLength;
int bytesRead;
while (bytesRemaining > 0 && (bytesRead = inputStream.read(buffer, 0, (int) Math.min(buffer.length, bytesRemaining))) != -1) {
rawOut.write(buffer, 0, bytesRead);
bytesRemaining -= bytesRead;
}
}
// Stream finished
if (base64Out != null) {
base64Out.close(); // Also flushes and closes the wrapped gzipOut
} else if (gzipOut != null) {
gzipOut.close(); // Only close gzipOut if it wasn't wrapped by base64Out
} else {
rawOut.flush(); // Flush only the base output stream if nothing was wrapped
}
if (!response.isCommitted()) {
response.setStatus(HttpServletResponse.SC_OK);
response.getWriter().write(" ");
}
} catch (IOException e) {
// Streaming errors should not rethrow — just log
LOGGER.warn(String.format("Streaming error for %s %s: %s", service, name, e.getMessage()));
}
else {
// Full content requested (or encoded data)
response.setStatus(200);
data = Files.readAllBytes(path); // TODO: limit file size that can be read into memory
} catch (IOException | ApiException | DataException e) {
LOGGER.warn(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
if (!response.isCommitted()) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
}
// Encode the data if requested
if (encoding != null && Objects.equals(encoding.toLowerCase(), "base64")) {
data = Base64.encode(data);
} catch (NumberFormatException e) {
LOGGER.warn(String.format("Invalid range for %s %s: %s", service, name, e.getMessage()));
if (!response.isCommitted()) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
}
response.addHeader("Accept-Ranges", "bytes");
response.setContentType(context.getMimeType(path.toString()));
response.setContentLength(data.length);
response.getOutputStream().write(data);
return response;
} catch (Exception e) {
LOGGER.debug(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
}
}
private FileProperties getFileProperties(Service service, String name, String identifier) {
try {

View File

@@ -502,10 +502,10 @@ public class CrossChainBitcoinResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns Bitcoin fee per Kb.",
description = "Returns Bitcoin fee per Kb.",
summary = "The total fee required for unlocking BTC to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
responses = {
@ApiResponse(
content = @Content(
@@ -516,17 +516,17 @@ public class CrossChainBitcoinResource {
)
}
)
public String getBitcoinFeeCeiling() {
public String getBitcoinFeeRequired() {
Bitcoin bitcoin = Bitcoin.getInstance();
return String.valueOf(bitcoin.getFeeCeiling());
return String.valueOf(bitcoin.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets Bitcoin fee ceiling.",
description = "Sets Bitcoin fee ceiling.",
summary = "The total fee required for unlocking BTC to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -545,13 +545,13 @@ public class CrossChainBitcoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setBitcoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setBitcoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
Bitcoin bitcoin = Bitcoin.getInstance();
try {
return CrossChainUtils.setFeeCeiling(bitcoin, fee);
return CrossChainUtils.setFeeRequired(bitcoin, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -502,10 +502,10 @@ public class CrossChainDigibyteResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns Digibyte fee per Kb.",
description = "Returns Digibyte fee per Kb.",
summary = "The total fee required for unlocking DGB to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
responses = {
@ApiResponse(
content = @Content(
@@ -516,17 +516,17 @@ public class CrossChainDigibyteResource {
)
}
)
public String getDigibyteFeeCeiling() {
public String getDigibyteFeeRequired() {
Digibyte digibyte = Digibyte.getInstance();
return String.valueOf(digibyte.getFeeCeiling());
return String.valueOf(digibyte.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets Digibyte fee ceiling.",
description = "Sets Digibyte fee ceiling.",
summary = "The total fee required for unlocking DGB to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -545,13 +545,13 @@ public class CrossChainDigibyteResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setDigibyteFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setDigibyteFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
Digibyte digibyte = Digibyte.getInstance();
try {
return CrossChainUtils.setFeeCeiling(digibyte, fee);
return CrossChainUtils.setFeeRequired(digibyte, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -502,10 +502,10 @@ public class CrossChainDogecoinResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns Dogecoin fee per Kb.",
description = "Returns Dogecoin fee per Kb.",
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
responses = {
@ApiResponse(
content = @Content(
@@ -516,17 +516,17 @@ public class CrossChainDogecoinResource {
)
}
)
public String getDogecoinFeeCeiling() {
public String getDogecoinFeeRequired() {
Dogecoin dogecoin = Dogecoin.getInstance();
return String.valueOf(dogecoin.getFeeCeiling());
return String.valueOf(dogecoin.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets Dogecoin fee ceiling.",
description = "Sets Dogecoin fee ceiling.",
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -545,13 +545,13 @@ public class CrossChainDogecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setDogecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setDogecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
Dogecoin dogecoin = Dogecoin.getInstance();
try {
return CrossChainUtils.setFeeCeiling(dogecoin, fee);
return CrossChainUtils.setFeeRequired(dogecoin, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -540,10 +540,10 @@ public class CrossChainLitecoinResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns Litecoin fee per Kb.",
description = "Returns Litecoin fee per Kb.",
summary = "The total fee required for unlocking LTC to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
responses = {
@ApiResponse(
content = @Content(
@@ -554,17 +554,17 @@ public class CrossChainLitecoinResource {
)
}
)
public String getLitecoinFeeCeiling() {
public String getLitecoinFeeRequired() {
Litecoin litecoin = Litecoin.getInstance();
return String.valueOf(litecoin.getFeeCeiling());
return String.valueOf(litecoin.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets Litecoin fee ceiling.",
description = "Sets Litecoin fee ceiling.",
summary = "The total fee required for unlocking LTC to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -583,13 +583,13 @@ public class CrossChainLitecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setLitecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setLitecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
Litecoin litecoin = Litecoin.getInstance();
try {
return CrossChainUtils.setFeeCeiling(litecoin, fee);
return CrossChainUtils.setFeeRequired(litecoin, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -587,10 +587,10 @@ public class CrossChainPirateChainResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns PirateChain fee per Kb.",
description = "Returns PirateChain fee per Kb.",
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
description = "The total fee required for unlocking ARRR to the trade offer creator.",
responses = {
@ApiResponse(
content = @Content(
@@ -601,17 +601,17 @@ public class CrossChainPirateChainResource {
)
}
)
public String getPirateChainFeeCeiling() {
public String getPirateChainFeeRequired() {
PirateChain pirateChain = PirateChain.getInstance();
return String.valueOf(pirateChain.getFeeCeiling());
return String.valueOf(pirateChain.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets PirateChain fee ceiling.",
description = "Sets PirateChain fee ceiling.",
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -630,13 +630,13 @@ public class CrossChainPirateChainResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setPirateChainFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setPirateChainFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
PirateChain pirateChain = PirateChain.getInstance();
try {
return CrossChainUtils.setFeeCeiling(pirateChain, fee);
return CrossChainUtils.setFeeRequired(pirateChain, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -502,10 +502,10 @@ public class CrossChainRavencoinResource {
}
@GET
@Path("/feeceiling")
@Path("/feerequired")
@Operation(
summary = "Returns Ravencoin fee per Kb.",
description = "Returns Ravencoin fee per Kb.",
summary = "The total fee required for unlocking RVN to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
responses = {
@ApiResponse(
content = @Content(
@@ -516,17 +516,17 @@ public class CrossChainRavencoinResource {
)
}
)
public String getRavencoinFeeCeiling() {
public String getRavencoinFeeRequired() {
Ravencoin ravencoin = Ravencoin.getInstance();
return String.valueOf(ravencoin.getFeeCeiling());
return String.valueOf(ravencoin.getFeeRequired());
}
@POST
@Path("/updatefeeceiling")
@Path("/updatefeerequired")
@Operation(
summary = "Sets Ravencoin fee ceiling.",
description = "Sets Ravencoin fee ceiling.",
summary = "The total fee required for unlocking RVN to the trade offer creator.",
description = "This is in sats for a transaction that is approximately 300 kB in size.",
requestBody = @RequestBody(
required = true,
content = @Content(
@@ -545,13 +545,13 @@ public class CrossChainRavencoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
public String setRavencoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
public String setRavencoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
Security.checkApiCallAllowed(request);
Ravencoin ravencoin = Ravencoin.getInstance();
try {
return CrossChainUtils.setFeeCeiling(ravencoin, fee);
return CrossChainUtils.setFeeRequired(ravencoin, fee);
}
catch (IllegalArgumentException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);

View File

@@ -10,6 +10,8 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.glassfish.jersey.media.multipart.ContentDisposition;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
@@ -18,6 +20,7 @@ import org.qortal.api.Security;
import org.qortal.api.model.CrossChainCancelRequest;
import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.model.CrossChainTradeSummary;
import org.qortal.controller.ForeignFeesManager;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crosschain.ACCT;
import org.qortal.crosschain.AcctMode;
@@ -29,6 +32,8 @@ import org.qortal.data.at.ATData;
import org.qortal.data.at.ATStateData;
import org.qortal.data.crosschain.CrossChainTradeData;
import org.qortal.data.crosschain.TransactionSummary;
import org.qortal.data.crosschain.ForeignFeeDecodedData;
import org.qortal.data.crosschain.ForeignFeeEncodedData;
import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.data.transaction.MessageTransactionData;
import org.qortal.data.transaction.TransactionData;
@@ -64,6 +69,8 @@ import java.util.stream.Collectors;
@Tag(name = "Cross-Chain")
public class CrossChainResource {
private static final Logger LOGGER = LogManager.getLogger(CrossChainResource.class);
@Context
HttpServletRequest request;
@@ -360,6 +367,101 @@ public class CrossChainResource {
}
}
@POST
@Path("/signedfees")
@Operation(
summary = "",
description = "",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
array = @ArraySchema(
schema = @Schema(
implementation = ForeignFeeEncodedData.class
)
)
)
),
responses = {
@ApiResponse(
description = "true on success",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "boolean"
)
)
)
}
)
public String postSignedForeignFees(List<ForeignFeeEncodedData> signedFees) {
LOGGER.info("signedFees = " + signedFees);
try {
ForeignFeesManager.getInstance().addSignedFees(signedFees);
return "true";
}
catch( Exception e ) {
LOGGER.error(e.getMessage(), e);
return "false";
}
}
@GET
@Path("/unsignedfees/{address}")
@Operation(
summary = "",
description = "",
responses = {
@ApiResponse(
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = ForeignFeeEncodedData.class
)
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
public List<ForeignFeeEncodedData> getUnsignedFees(@PathParam("address") String address) {
List<ForeignFeeEncodedData> unsignedFeesForAddress = ForeignFeesManager.getInstance().getUnsignedFeesForAddress(address);
LOGGER.info("address = " + address);
LOGGER.info("returning unsigned = " + unsignedFeesForAddress);
return unsignedFeesForAddress;
}
@GET
@Path("/signedfees")
@Operation(
summary = "",
description = "",
responses = {
@ApiResponse(
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = ForeignFeeDecodedData.class
)
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
public List<ForeignFeeDecodedData> getSignedFees() {
return ForeignFeesManager.getInstance().getSignedFees();
}
/**
* Decode Public Key
*

View File

@@ -12,10 +12,15 @@ import org.bouncycastle.util.Strings;
import org.json.simple.JSONObject;
import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.model.crosschain.BitcoinyTBDRequest;
import org.qortal.asset.Asset;
import org.qortal.crosschain.*;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.at.ATData;
import org.qortal.data.at.ATStateData;
import org.qortal.data.crosschain.*;
import org.qortal.event.EventBus;
import org.qortal.event.LockingFeeUpdateEvent;
import org.qortal.event.RequiredFeeUpdateEvent;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.utils.Amounts;
@@ -23,15 +28,11 @@ import org.qortal.utils.BitTwiddling;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Writer;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -103,11 +104,13 @@ public class CrossChainUtils {
bitcoiny.setFeePerKb(Coin.valueOf(satoshis) );
EventBus.INSTANCE.notify(new LockingFeeUpdateEvent());
return String.valueOf(bitcoiny.getFeePerKb().value);
}
/**
* Set Fee Ceiling
* Set Fee Required
*
* @param bitcoiny the blockchain support
* @param fee the fee in satoshis
@@ -116,14 +119,16 @@ public class CrossChainUtils {
*
* @throws IllegalArgumentException if invalid
*/
public static String setFeeCeiling(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
public static String setFeeRequired(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
long satoshis = Long.parseLong(fee);
if( satoshis < 0 ) throw new IllegalArgumentException("can't set fee to negative number");
bitcoiny.setFeeCeiling( Long.parseLong(fee));
bitcoiny.setFeeRequired( Long.parseLong(fee));
return String.valueOf(bitcoiny.getFeeCeiling());
EventBus.INSTANCE.notify(new RequiredFeeUpdateEvent(bitcoiny));
return String.valueOf(bitcoiny.getFeeRequired());
}
/**
@@ -232,6 +237,9 @@ public class CrossChainUtils {
return bitcoiny.getBlockchainProvider().removeServer(server);
}
public static ChainableServer getCurrentServer( Bitcoiny bitcoiny ) {
return bitcoiny.getBlockchainProvider().getCurrentServer();
}
/**
* Set Current Server
*
@@ -771,4 +779,46 @@ public class CrossChainUtils {
entries.add(ledgerEntry);
}
}
public static List<CrossChainTradeData> populateTradeDataList(Repository repository, ACCT acct, List<ATData> atDataList) throws DataException {
if(atDataList.isEmpty()) return new ArrayList<>(0);
List<ATStateData> latestATStates
= repository.getATRepository()
.getLatestATStates(
atDataList.stream()
.map(ATData::getATAddress)
.collect(Collectors.toList())
);
Map<String, ATStateData> atStateDataByAtAddress
= latestATStates.stream().collect(Collectors.toMap(ATStateData::getATAddress, Function.identity()));
Map<String, ATData> atDataByAtAddress
= atDataList.stream().collect(Collectors.toMap(ATData::getATAddress, Function.identity()));
Map<String, Long> balanceByAtAddress
= repository
.getAccountRepository()
.getBalances(new ArrayList<>(atDataByAtAddress.keySet()), Asset.QORT)
.stream().collect(Collectors.toMap(AccountBalanceData::getAddress, AccountBalanceData::getBalance));
List<CrossChainTradeData> crossChainTradeDataList = new ArrayList<>(latestATStates.size());
for( ATStateData atStateData : latestATStates ) {
ATData atData = atDataByAtAddress.get(atStateData.getATAddress());
crossChainTradeDataList.add(
acct.populateTradeData(
repository,
atData.getCreatorPublicKey(),
atData.getCreation(),
atStateData,
OptionalLong.of(balanceByAtAddress.get(atStateData.getATAddress()))
)
);
}
return crossChainTradeDataList;
}
}

View File

@@ -33,6 +33,7 @@ import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
@Path("/names")
@@ -104,6 +105,45 @@ public class NamesResource {
}
}
@GET
@Path("/primary/{address}")
@Operation(
summary = "primary name owned by address",
responses = {
@ApiResponse(
description = "registered primary name info",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(implementation = NameSummary.class)
)
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE, ApiError.UNAUTHORIZED})
public NameSummary getPrimaryNameByAddress(@PathParam("address") String address) {
if (!Crypto.isValidAddress(address))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
try (final Repository repository = RepositoryManager.getRepository()) {
if (Settings.getInstance().isLite()) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
}
else {
Optional<String> primaryName = repository.getNameRepository().getPrimaryName(address);
if(primaryName.isPresent()) {
return new NameSummary(new NameData(primaryName.get(), address));
}
else {
return new NameSummary((new NameData(null, address)));
}
}
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/{name}")
@Operation(

View File

@@ -1092,25 +1092,4 @@ public class AdminResource {
return info;
}
@GET
@Path("/dbstates")
@Operation(
summary = "Get DB States",
description = "Get DB States",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = DbConnectionInfo.class)))
)
}
)
public List<DbConnectionInfo> getDbConnectionsStates() {
try {
return Controller.REPOSITORY_FACTORY.getDbConnectionsStates();
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
return new ArrayList<>(0);
}
}
}

View File

@@ -71,33 +71,33 @@ public class RenderResource {
@Path("/signature/{signature}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme, lang);
}
@GET
@Path("/signature/{signature}/{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme, lang);
}
@GET
@Path("/hash/{hash}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme, lang);
}
@GET
@@ -105,11 +105,11 @@ public class RenderResource {
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
@QueryParam("secret") String secret58,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme, lang);
}
@GET
@@ -119,12 +119,12 @@ public class RenderResource {
@PathParam("name") String name,
@PathParam("path") String inPath,
@QueryParam("identifier") String identifier,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme);
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme, lang);
}
@GET
@@ -133,18 +133,18 @@ public class RenderResource {
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("identifier") String identifier,
@QueryParam("theme") String theme) {
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme);
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme, lang);
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme) {
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme, String lang) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
secret58, prefix, includeResourceIdInPrefix, async, "render", request, response, context);
@@ -152,6 +152,9 @@ public class RenderResource {
if (theme != null) {
renderer.setTheme(theme);
}
if (lang != null) {
renderer.setLang(lang);
}
return renderer.render();
}

View File

@@ -0,0 +1,83 @@
package org.qortal.api.websocket;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.WebSocketException;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
import org.qortal.data.crosschain.UnsignedFeeEvent;
import org.qortal.event.Event;
import org.qortal.event.EventBus;
import org.qortal.event.FeeWaitingEvent;
import org.qortal.event.Listener;
import java.io.IOException;
import java.io.StringWriter;
@WebSocket
@SuppressWarnings("serial")
public class UnsignedFeesSocket extends ApiWebSocket implements Listener {
private static final Logger LOGGER = LogManager.getLogger(UnsignedFeesSocket.class);
@Override
public void configure(WebSocketServletFactory factory) {
LOGGER.info("configure");
factory.register(UnsignedFeesSocket.class);
EventBus.INSTANCE.addListener(this);
}
@Override
public void listen(Event event) {
if (!(event instanceof FeeWaitingEvent))
return;
for (Session session : getSessions()) {
FeeWaitingEvent feeWaitingEvent = (FeeWaitingEvent) event;
sendUnsignedFeeEvent(session, new UnsignedFeeEvent(feeWaitingEvent.isPositive(), feeWaitingEvent.getAddress()));
}
}
@OnWebSocketConnect
@Override
public void onWebSocketConnect(Session session) {
super.onWebSocketConnect(session);
}
@OnWebSocketClose
@Override
public void onWebSocketClose(Session session, int statusCode, String reason) {
super.onWebSocketClose(session, statusCode, reason);
}
@OnWebSocketError
public void onWebSocketError(Session session, Throwable throwable) {
/* We ignore errors for now, but method here to silence log spam */
}
@OnWebSocketMessage
public void onWebSocketMessage(Session session, String message) {
LOGGER.info("onWebSocketMessage: message = " + message);
}
private void sendUnsignedFeeEvent(Session session, UnsignedFeeEvent unsignedFeeEvent) {
StringWriter stringWriter = new StringWriter();
try {
marshall(stringWriter, unsignedFeeEvent);
session.getRemote().sendStringByFuture(stringWriter.toString());
} catch (IOException | WebSocketException e) {
// No output this time
}
}
}

View File

@@ -4,9 +4,12 @@ import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
@@ -23,37 +26,53 @@ public class ArbitraryDataDigest {
}
public void compute() throws IOException, DataException {
List<Path> allPaths = Files.walk(path).filter(Files::isRegularFile).sorted().collect(Collectors.toList());
List<Path> allPaths = Files.walk(path)
.filter(Files::isRegularFile)
.sorted()
.collect(Collectors.toList());
Path basePathAbsolute = this.path.toAbsolutePath();
MessageDigest sha256;
try {
sha256 = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw new DataException("SHA-256 hashing algorithm unavailable");
}
for (Path path : allPaths) {
// We need to work with paths relative to the base path, to ensure the same hash
// is generated on different systems
Path relativePath = basePathAbsolute.relativize(path.toAbsolutePath());
// Exclude Qortal folder since it can be different each time
// We only care about hashing the actual user data
if (relativePath.startsWith(".qortal/")) {
continue;
}
// Account for \ VS / : Linux VS Windows
String pathString = relativePath.toString();
if (relativePath.getFileSystem().toString().contains("Windows")) {
pathString = pathString.replace("\\", "/");
}
// Hash path
byte[] filePathBytes = relativePath.toString().getBytes(StandardCharsets.UTF_8);
byte[] filePathBytes = pathString.getBytes(StandardCharsets.UTF_8);
sha256.update(filePathBytes);
// Hash contents
byte[] fileContent = Files.readAllBytes(path);
sha256.update(fileContent);
try (InputStream in = Files.newInputStream(path)) {
byte[] buffer = new byte[65536]; // 64 KB
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
sha256.update(buffer, 0, bytesRead);
}
}
}
this.hash = sha256.digest();
}
public boolean isHashValid(byte[] hash) {
return Arrays.equals(hash, this.hash);

View File

@@ -52,7 +52,7 @@ public class ArbitraryDataFile {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class);
public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB
public static final long MAX_FILE_SIZE = 2L * 1024 * 1024 * 1024; // 2 GiB
protected static final int MAX_CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
public static final int CHUNK_SIZE = 512 * 1024; // 0.5MiB
public static int SHORT_DIGEST_LENGTH = 8;

View File

@@ -1,6 +1,7 @@
package org.qortal.arbitrary;
import com.google.common.io.Resources;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.LogManager;
@@ -15,11 +16,13 @@ import org.qortal.settings.Settings;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
@@ -37,6 +40,7 @@ public class ArbitraryDataRenderer {
private final Service service;
private final String identifier;
private String theme = "light";
private String lang = "en";
private String inPath;
private final String secret58;
private final String prefix;
@@ -166,9 +170,16 @@ public class ArbitraryDataRenderer {
if (HTMLParser.isHtmlFile(filename)) {
// HTML file - needs to be parsed
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
String encodedResourceId;
if (resourceIdType == ResourceIdType.NAME) {
encodedResourceId = resourceId.replace(" ", "%20");
} else {
encodedResourceId = resourceId;
}
HTMLParser htmlParser = new HTMLParser(encodedResourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting, lang);
htmlParser.addAdditionalHeaderTags();
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss:;");
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss: blob:;");
response.setContentType(context.getMimeType(filename));
response.setContentLength(htmlParser.getData().length);
response.getOutputStream().write(htmlParser.getData());
@@ -256,5 +267,8 @@ public class ArbitraryDataRenderer {
public void setTheme(String theme) {
this.theme = theme;
}
public void setLang(String lang) {
this.lang = lang;
}
}

View File

@@ -29,6 +29,7 @@ import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
@@ -197,7 +198,7 @@ public class ArbitraryDataTransactionBuilder {
// We can't use PATCH for on-chain data because this requires the .qortal directory, which can't be put on chain
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(this.path, false);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
if (shouldUseOnChainData) {
LOGGER.info("Data size is small enough to go on chain - using PUT");
return Method.PUT;
@@ -245,7 +246,7 @@ public class ArbitraryDataTransactionBuilder {
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
// Use zip compression if data isn't going on chain
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;

View File

@@ -37,7 +37,7 @@ public enum Service {
if (files != null && files[0] != null) {
final String extension = FilenameUtils.getExtension(files[0].getName()).toLowerCase();
// We must allow blank file extensions because these are used by data published from a plaintext or base64-encoded string
final List<String> allowedExtensions = Arrays.asList("zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
final List<String> allowedExtensions = Arrays.asList("qortal", "zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
if (extension == null || !allowedExtensions.contains(extension)) {
return ValidationResult.INVALID_FILE_EXTENSION;
}
@@ -62,7 +62,17 @@ public enum Service {
// Custom validation function to require an index HTML file in the root directory
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
String[] files = path.toFile().list();
List<String> files;
// single files are paackaged differently
if( path.toFile().isFile() ) {
files = new ArrayList<>(1);
files.add(path.getFileName().toString());
}
else {
files = new ArrayList<>(Arrays.asList(path.toFile().list()));
}
if (files != null) {
for (String file : files) {
Path fileName = Paths.get(file).getFileName();

View File

@@ -1640,6 +1640,8 @@ public class Block {
SelfSponsorshipAlgoV2Block.processAccountPenalties(this);
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
SelfSponsorshipAlgoV3Block.processAccountPenalties(this);
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
PrimaryNamesBlock.processNames(this.repository);
}
}
}
@@ -1721,11 +1723,19 @@ public class Block {
accountData.setBlocksMinted(accountData.getBlocksMinted() + 1);
LOGGER.trace(() -> String.format("Block minter %s up to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
int blocksMintedAdjustment
=
(this.blockData.getHeight() > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
?
0
:
accountData.getBlocksMintedAdjustment();
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
if (newLevel > accountData.getLevel()) {
if (newLevel != accountData.getLevel()) {
// Account has increased in level!
accountData.setLevel(newLevel);
bumpedAccounts.put(accountData.getAddress(), newLevel);
@@ -1952,6 +1962,8 @@ public class Block {
SelfSponsorshipAlgoV2Block.orphanAccountPenalties(this);
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
PrimaryNamesBlock.orphanNames( this.repository );
}
}
@@ -2127,11 +2139,19 @@ public class Block {
accountData.setBlocksMinted(accountData.getBlocksMinted() - 1);
LOGGER.trace(() -> String.format("Block minter %s down to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
int blocksMintedAdjustment
=
(this.blockData.getHeight() -1 > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
?
0
:
accountData.getBlocksMintedAdjustment();
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
if (newLevel < accountData.getLevel()) {
if (newLevel != accountData.getLevel()) {
// Account has decreased in level!
accountData.setLevel(newLevel);
repository.getAccountRepository().setLevel(accountData);

View File

@@ -92,7 +92,9 @@ public class BlockChain {
adminsReplaceFoundersHeight,
nullGroupMembershipHeight,
ignoreLevelForRewardShareHeight,
adminQueryFixHeight
adminQueryFixHeight,
multipleNamesPerAccountHeight,
mintedBlocksAdjustmentRemovalHeight
}
// Custom transaction fees
@@ -112,7 +114,8 @@ public class BlockChain {
/** Whether to use legacy, broken RIPEMD160 implementation when converting public keys to addresses. */
private boolean useBrokenMD160ForAddresses = false;
/** Whether only one registered name is allowed per account. */
/** This should get ignored and overwritten in the oneNamePerAccount(int blockchainHeight) method,
* because it is based on block height, not based on the genesis block.*/
private boolean oneNamePerAccount = false;
/** Checkpoints */
@@ -474,8 +477,9 @@ public class BlockChain {
return this.useBrokenMD160ForAddresses;
}
public boolean oneNamePerAccount() {
return this.oneNamePerAccount;
public boolean oneNamePerAccount(int blockchainHeight) {
// this is not set on a simple blockchain setting, it is based on a feature trigger height
return blockchainHeight < this.getMultipleNamesPerAccountHeight();
}
public List<Checkpoint> getCheckpoints() {
@@ -688,6 +692,14 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.adminQueryFixHeight.name()).intValue();
}
public int getMultipleNamesPerAccountHeight() {
return this.featureTriggers.get(FeatureTrigger.multipleNamesPerAccountHeight.name()).intValue();
}
public int getMintedBlocksAdjustmentRemovalHeight() {
return this.featureTriggers.get(FeatureTrigger.mintedBlocksAdjustmentRemovalHeight.name()).intValue();
}
// More complex getters for aspects that change by height or timestamp
public long getRewardAtHeight(int ourHeight) {

View File

@@ -0,0 +1,47 @@
package org.qortal.block;
import org.qortal.account.Account;
import org.qortal.api.resource.TransactionsResource;
import org.qortal.data.naming.NameData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Class PrimaryNamesBlock
*/
public class PrimaryNamesBlock {
/**
* Process Primary Names
*
* @param repository
* @throws DataException
*/
public static void processNames(Repository repository) throws DataException {
Set<String> addressesWithNames
= repository.getNameRepository().getAllNames().stream()
.map(NameData::getOwner).collect(Collectors.toSet());
// for each address with a name, set primary name to the address
for( String address : addressesWithNames ) {
Account account = new Account(repository, address);
account.resetPrimaryName(TransactionsResource.ConfirmationStatus.CONFIRMED);
}
}
/**
* Orphan the Primary Names Block
*
* @param repository
* @throws DataException
*/
public static void orphanNames(Repository repository) throws DataException {
repository.getNameRepository().clearPrimaryNames();
}
}

View File

@@ -46,6 +46,7 @@ import org.qortal.utils.*;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.awt.TrayIcon.MessageType;
import java.io.File;
import java.io.FileNotFoundException;
@@ -53,6 +54,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.SecureRandom;
@@ -70,11 +72,10 @@ import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class Controller extends Thread {
public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
static {
// This must go before any calls to LogManager/Logger
System.setProperty("log4j2.formatMsgNoLookups", "true");
@@ -396,6 +397,9 @@ public class Controller extends Thread {
Controller.newInstance(args);
cleanChunkUploadTempDir(); // cleanup leftover chunks from streaming to disk
LOGGER.info("Starting NTP");
Long ntpOffset = Settings.getInstance().getTestNtpOffset();
if (ntpOffset != null)
@@ -405,8 +409,8 @@ public class Controller extends Thread {
LOGGER.info("Starting repository");
try {
REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
HSQLDBRepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
RepositoryManager.setRepositoryFactory(repositoryFactory);
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
@@ -560,6 +564,12 @@ public class Controller extends Thread {
LOGGER.info("Starting online accounts manager");
OnlineAccountsManager.getInstance().start();
LOGGER.info("Starting foreign fees manager");
ForeignFeesManager.getInstance().start();
LOGGER.info("Starting follower");
Follower.getInstance().start();
LOGGER.info("Starting transaction importer");
TransactionImporter.getInstance().start();
@@ -1130,6 +1140,9 @@ public class Controller extends Thread {
LOGGER.info("Shutting down online accounts manager");
OnlineAccountsManager.getInstance().shutdown();
LOGGER.info("Shutting down foreign fees manager");
ForeignFeesManager.getInstance().shutdown();
LOGGER.info("Shutting down transaction importer");
TransactionImporter.getInstance().shutdown();
@@ -1474,6 +1487,14 @@ public class Controller extends Thread {
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message);
break;
case GET_FOREIGN_FEES:
ForeignFeesManager.getInstance().onNetworkGetForeignFeesMessage(peer, message);
break;
case FOREIGN_FEES:
ForeignFeesManager.getInstance().onNetworkForeignFeesMessage(peer, message);
break;
case GET_ARBITRARY_DATA:
// Not currently supported
break;
@@ -2160,6 +2181,24 @@ public class Controller extends Thread {
return now - offset;
}
private static void cleanChunkUploadTempDir() {
Path uploadsTemp = Paths.get("uploads-temp");
if (!Files.exists(uploadsTemp)) {
return;
}
try (Stream<Path> paths = Files.walk(uploadsTemp)) {
paths.sorted(Comparator.reverseOrder())
.map(Path::toFile)
.forEach(File::delete);
LOGGER.info("Cleaned up all temporary uploads in {}", uploadsTemp);
} catch (IOException e) {
LOGGER.warn("Failed to clean up uploads-temp directory", e);
}
}
public StatsSnapshot getStatsSnapshot() {
return this.stats;
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@ package org.qortal.controller;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.arbitrary.PeerMessage;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
@@ -20,7 +21,11 @@ import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.stream.Collectors;
public class TransactionImporter extends Thread {
@@ -50,6 +55,10 @@ public class TransactionImporter extends Thread {
/** Cached list of unconfirmed transactions, used when counting per creator. This is replaced regularly */
public static List<TransactionData> unconfirmedTransactionsCache = null;
public TransactionImporter() {
signatureMessageScheduler.scheduleAtFixedRate(this::processNetworkTransactionSignaturesMessage, 60, 1, TimeUnit.SECONDS);
getTransactionMessageScheduler.scheduleAtFixedRate(this::processNetworkGetTransactionMessages, 60, 1, TimeUnit.SECONDS);
}
public static synchronized TransactionImporter getInstance() {
if (instance == null) {
@@ -371,36 +380,104 @@ public class TransactionImporter extends Thread {
}
}
// List to collect messages
private final List<PeerMessage> getTransactionMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object getTransactionMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService getTransactionMessageScheduler = Executors.newScheduledThreadPool(1);
public void onNetworkGetTransactionMessage(Peer peer, Message message) {
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message;
byte[] signature = getTransactionMessage.getSignature();
try (final Repository repository = RepositoryManager.getRepository()) {
synchronized (getTransactionMessageLock) {
getTransactionMessageList.add(new PeerMessage(peer, message));
}
}
private void processNetworkGetTransactionMessages() {
try {
List<PeerMessage> messagesToProcess;
synchronized (getTransactionMessageLock) {
messagesToProcess = new ArrayList<>(getTransactionMessageList);
getTransactionMessageList.clear();
}
if( messagesToProcess.isEmpty() ) return;
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
for( PeerMessage peerMessage : messagesToProcess ) {
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) peerMessage.getMessage();
byte[] signature = getTransactionMessage.getSignature();
peerMessageBySignature58.put(Base58.encode(signature), peerMessage);
}
// Firstly check the sig-valid transactions that are currently queued for import
TransactionData transactionData = this.getCachedSigValidTransactions().stream()
.filter(t -> Arrays.equals(signature, t.getSignature()))
.findFirst().orElse(null);
Map<String, TransactionData> transactionsCachedBySignature58
= this.getCachedSigValidTransactions().stream()
.collect(Collectors.toMap(t -> Base58.encode(t.getSignature()), Function.identity()));
if (transactionData == null) {
Map<Boolean, List<Map.Entry<String, PeerMessage>>> transactionsCachedBySignature58Partition
= peerMessageBySignature58.entrySet().stream()
.collect(Collectors.partitioningBy(entry -> transactionsCachedBySignature58.containsKey(entry.getKey())));
List<byte[]> signaturesNeeded
= transactionsCachedBySignature58Partition.get(false).stream()
.map(Map.Entry::getValue)
.map(PeerMessage::getMessage)
.map(message -> (GetTransactionMessage) message)
.map(GetTransactionMessage::getSignature)
.collect(Collectors.toList());
// transaction found in the import queue
Map<String, TransactionData> transactionsToSendBySignature58 = new HashMap<>(messagesToProcess.size());
for( Map.Entry<String, PeerMessage> entry : transactionsCachedBySignature58Partition.get(true)) {
transactionsToSendBySignature58.put(entry.getKey(), transactionsCachedBySignature58.get(entry.getKey()));
}
if( !signaturesNeeded.isEmpty() ) {
// Not found in import queue, so try the database
transactionData = repository.getTransactionRepository().fromSignature(signature);
try (final Repository repository = RepositoryManager.getRepository()) {
transactionsToSendBySignature58.putAll(
repository.getTransactionRepository().fromSignatures(signaturesNeeded).stream()
.collect(Collectors.toMap(transactionData -> Base58.encode(transactionData.getSignature()), Function.identity()))
);
} catch (DataException e) {
LOGGER.error(e.getMessage(), e);
}
}
if (transactionData == null) {
// Still not found - so we don't have this transaction
LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
// Send no response at all???
return;
}
for( final Map.Entry<String, TransactionData> entry : transactionsToSendBySignature58.entrySet() ) {
Message transactionMessage = new TransactionMessage(transactionData);
PeerMessage peerMessage = peerMessageBySignature58.get(entry.getKey());
final Message message = peerMessage.getMessage();
final Peer peer = peerMessage.getPeer();
Runnable sendTransactionMessageRunner = () -> sendTransactionMessage(entry.getKey(), entry.getValue(), message, peer);
Thread sendTransactionMessageThread = new Thread(sendTransactionMessageRunner);
sendTransactionMessageThread.start();
}
} catch (Exception e) {
LOGGER.error(e.getMessage(),e);
}
}
private static void sendTransactionMessage(String signature58, TransactionData data, Message message, Peer peer) {
try {
Message transactionMessage = new TransactionMessage(data);
transactionMessage.setId(message.getId());
if (!peer.sendMessage(transactionMessage))
peer.disconnect("failed to send transaction");
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
} catch (TransformationException e) {
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
}
catch (TransformationException e) {
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", signature58, peer), e);
}
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
@@ -421,44 +498,86 @@ public class TransactionImporter extends Thread {
}
}
// List to collect messages
private final List<PeerMessage> signatureMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object signatureMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService signatureMessageScheduler = Executors.newScheduledThreadPool(1);
public void onNetworkTransactionSignaturesMessage(Peer peer, Message message) {
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message;
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
synchronized (signatureMessageLock) {
signatureMessageList.add(new PeerMessage(peer, message));
}
}
try (final Repository repository = RepositoryManager.getRepository()) {
for (byte[] signature : signatures) {
String signature58 = Base58.encode(signature);
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
// Previously invalid transaction - don't keep requesting it
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
continue;
}
public void processNetworkTransactionSignaturesMessage() {
// Ignore if this transaction is in the queue
if (incomingTransactionQueueContains(signature)) {
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
continue;
}
try {
List<PeerMessage> messagesToProcess;
synchronized (signatureMessageLock) {
messagesToProcess = new ArrayList<>(signatureMessageList);
signatureMessageList.clear();
}
// Do we have it already? (Before requesting transaction data itself)
if (repository.getTransactionRepository().exists(signature)) {
LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
continue;
}
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size() * 10);
Map<String, Peer> peerBySignature58 = new HashMap<>( messagesToProcess.size() * 10 );
// Check isInterrupted() here and exit fast
if (Thread.currentThread().isInterrupted())
return;
for( PeerMessage peerMessage : messagesToProcess ) {
// Fetch actual transaction data from peer
Message getTransactionMessage = new GetTransactionMessage(signature);
if (!peer.sendMessage(getTransactionMessage)) {
peer.disconnect("failed to request transaction");
return;
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) peerMessage.getMessage();
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
for (byte[] signature : signatures) {
String signature58 = Base58.encode(signature);
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
// Previously invalid transaction - don't keep requesting it
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
continue;
}
// Ignore if this transaction is in the queue
if (incomingTransactionQueueContains(signature)) {
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peerMessage.getPeer()));
continue;
}
signatureBySignature58.put(signature58, signature);
peerBySignature58.put(signature58, peerMessage.getPeer());
}
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e);
if( !signatureBySignature58.isEmpty() ) {
try (final Repository repository = RepositoryManager.getRepository()) {
// remove signatures in db already
repository.getTransactionRepository()
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
.map(TransactionData::getSignature)
.map(signature -> Base58.encode(signature))
.forEach(signature58 -> signatureBySignature58.remove(signature58));
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer"), e);
}
}
// Check isInterrupted() here and exit fast
if (Thread.currentThread().isInterrupted())
return;
for (Map.Entry<String, byte[]> entry : signatureBySignature58.entrySet()) {
Peer peer = peerBySignature58.get(entry.getKey());
// Fetch actual transaction data from peer
Message getTransactionMessage = new GetTransactionMessage(entry.getValue());
if (peer != null && !peer.sendMessage(getTransactionMessage)) {
peer.disconnect("failed to request transaction");
}
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}

View File

@@ -25,6 +25,10 @@ import org.qortal.utils.NTP;
import org.qortal.utils.Triple;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.qortal.controller.arbitrary.ArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES;
@@ -73,6 +77,8 @@ public class ArbitraryDataFileListManager {
private ArbitraryDataFileListManager() {
getArbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
arbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
}
public static ArbitraryDataFileListManager getInstance() {
@@ -118,8 +124,8 @@ public class ArbitraryDataFileListManager {
if (timeSinceLastAttempt > 15 * 1000L) {
// We haven't tried for at least 15 seconds
if (networkBroadcastCount < 3) {
// We've made less than 3 total attempts
if (networkBroadcastCount < 12) {
// We've made less than 12 total attempts
return true;
}
}
@@ -128,8 +134,8 @@ public class ArbitraryDataFileListManager {
if (timeSinceLastAttempt > 60 * 1000L) {
// We haven't tried for at least 1 minute
if (networkBroadcastCount < 8) {
// We've made less than 8 total attempts
if (networkBroadcastCount < 40) {
// We've made less than 40 total attempts
return true;
}
}
@@ -396,11 +402,11 @@ public class ArbitraryDataFileListManager {
return true;
}
public void deleteFileListRequestsForSignature(byte[] signature) {
String signature58 = Base58.encode(signature);
public void deleteFileListRequestsForSignature(String signature58) {
for (Iterator<Map.Entry<Integer, Triple<String, Peer, Long>>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) {
Map.Entry<Integer, Triple<String, Peer, Long>> entry = it.next();
if (entry == null || entry.getKey() == null || entry.getValue() != null) {
if (entry == null || entry.getKey() == null || entry.getValue() == null) {
continue;
}
if (Objects.equals(entry.getValue().getA(), signature58)) {
@@ -413,70 +419,116 @@ public class ArbitraryDataFileListManager {
// Network handlers
// List to collect messages
private final List<PeerMessage> arbitraryDataFileListMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object arbitraryDataFileListMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService arbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
public void onNetworkArbitraryDataFileListMessage(Peer peer, Message message) {
// Don't process if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
synchronized (arbitraryDataFileListMessageLock) {
arbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
}
}
// Do we have a pending request for this data?
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
if (request == null || request.getA() == null) {
return;
}
boolean isRelayRequest = (request.getB() != null);
private void processNetworkArbitraryDataFileListMessage() {
// Does this message's signature match what we're expecting?
byte[] signature = arbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
if (!request.getA().equals(signature58)) {
return;
}
try {
List<PeerMessage> messagesToProcess;
synchronized (arbitraryDataFileListMessageLock) {
messagesToProcess = new ArrayList<>(arbitraryDataFileListMessageList);
arbitraryDataFileListMessageList.clear();
}
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
if (hashes == null || hashes.isEmpty()) {
return;
}
if (messagesToProcess.isEmpty()) return;
ArbitraryTransactionData arbitraryTransactionData = null;
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, Boolean> isRelayRequestBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, List<byte[]>> hashesBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, Triple<String, Peer, Long>> requestBySignature58 = new HashMap<>(messagesToProcess.size());
// Check transaction exists and hashes are correct
try (final Repository repository = RepositoryManager.getRepository()) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
for (PeerMessage peerMessage : messagesToProcess) {
Peer peer = peerMessage.getPeer();
Message message = peerMessage.getMessage();
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
}
// Do we have a pending request for this data?
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
if (request == null || request.getA() == null) {
continue;
}
boolean isRelayRequest = (request.getB() != null);
// Does this message's signature match what we're expecting?
byte[] signature = arbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
if (!request.getA().equals(signature58)) {
continue;
}
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
if (hashes == null || hashes.isEmpty()) {
continue;
}
peerMessageBySignature58.put(signature58, peerMessage);
signatureBySignature58.put(signature58, signature);
isRelayRequestBySignature58.put(signature58, isRelayRequest);
hashesBySignature58.put(signature58, hashes);
requestBySignature58.put(signature58, request);
}
if (signatureBySignature58.isEmpty()) return;
List<ArbitraryTransactionData> arbitraryTransactionDataList;
// Check transaction exists and hashes are correct
try (final Repository repository = RepositoryManager.getRepository()) {
arbitraryTransactionDataList
= repository.getTransactionRepository()
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
.filter(data -> data instanceof ArbitraryTransactionData)
.map(data -> (ArbitraryTransactionData) data)
.collect(Collectors.toList());
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list"), e);
return;
}
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
for (ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList) {
// // Load data file(s)
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
//
// // Check all hashes exist
// for (byte[] hash : hashes) {
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
// if (!arbitraryDataFile.containsChunk(hash)) {
// // Check the hash against the complete file
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
// return;
// }
// }
// }
byte[] signature = arbitraryTransactionData.getSignature();
String signature58 = Base58.encode(signature);
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
Long now = NTP.getTime();
List<byte[]> hashes = hashesBySignature58.get(signature58);
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
Peer peer = peerMessage.getPeer();
Message message = peerMessage.getMessage();
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
Boolean isRelayRequest = isRelayRequestBySignature58.get(signature58);
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
Long now = NTP.getTime();
if (ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) {
// Keep track of the hashes this peer reports to have access to
for (byte[] hash : hashes) {
String hash58 = Base58.encode(hash);
@@ -487,233 +539,300 @@ public class ArbitraryDataFileListManager {
ArbitraryFileListResponseInfo responseInfo = new ArbitraryFileListResponseInfo(hash58, signature58,
peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops);
ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo);
ArbitraryDataFileManager.getInstance().addResponse(responseInfo);
}
// Keep track of the source peer, for direct connections
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
}
}
// Keep track of the source peer, for direct connections
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
}
}
// Forwarding
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e);
}
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
if (!isBlocked) {
Triple<String, Peer, Long> request = requestBySignature58.get(signature58);
Peer requestingPeer = request.getB();
if (requestingPeer != null) {
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
// Forwarding
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
if (!isBlocked) {
Peer requestingPeer = request.getB();
if (requestingPeer != null) {
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
// Add each hash to our local mapping so we know who to ask later
Long now = NTP.getTime();
for (byte[] hash : hashes) {
String hash58 = Base58.encode(hash);
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
}
// Add each hash to our local mapping so we know who to ask later
Long now = NTP.getTime();
for (byte[] hash : hashes) {
String hash58 = Base58.encode(hash);
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
}
// Bump requestHops if it exists
if (requestHops != null) {
requestHops++;
}
// Bump requestHops if it exists
if (requestHops != null) {
requestHops++;
}
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
}
forwardArbitraryDataFileListMessage.setId(message.getId());
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
}
forwardArbitraryDataFileListMessage.setId(message.getId());
// Forward to requesting peer
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
requestingPeer.disconnect("failed to forward arbitrary data file list");
// Forward to requesting peer
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
requestingPeer.sendMessage(forwardArbitraryDataFileListMessage);
}
}
}
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
// List to collect messages
private final List<PeerMessage> getArbitraryDataFileListMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object getArbitraryDataFileListMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService getArbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
public void onNetworkGetArbitraryDataFileListMessage(Peer peer, Message message) {
// Don't respond if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
byte[] signature = getArbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
Long now = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
// If we've seen this request recently, then ignore
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
return;
synchronized (getArbitraryDataFileListMessageLock) {
getArbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
}
}
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
private void processNetworkGetArbitraryDataFileListMessage() {
if (requestingPeer != null) {
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
}
else {
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
}
try {
List<PeerMessage> messagesToProcess;
synchronized (getArbitraryDataFileListMessageLock) {
messagesToProcess = new ArrayList<>(getArbitraryDataFileListMessageList);
getArbitraryDataFileListMessageList.clear();
}
List<byte[]> hashes = new ArrayList<>();
ArbitraryTransactionData transactionData = null;
boolean allChunksExist = false;
boolean hasMetadata = false;
if (messagesToProcess.isEmpty()) return;
try (final Repository repository = RepositoryManager.getRepository()) {
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, List<byte[]>> requestedHashesBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, String> requestingPeerBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String, Long> nowBySignature58 = new HashMap<>((messagesToProcess.size()));
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
// Firstly we need to lookup this file on chain to get a list of its hashes
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
if (transactionData instanceof ArbitraryTransactionData) {
for (PeerMessage messagePeer : messagesToProcess) {
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
Message message = messagePeer.message;
Peer peer = messagePeer.peer;
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
byte[] signature = getArbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
Long now = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
// If we've seen this request recently, then ignore
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
continue;
}
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
if (requestingPeer != null) {
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
} else {
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
}
signatureBySignature58.put(signature58, signature);
requestedHashesBySignature58.put(signature58, requestedHashes);
requestingPeerBySignature58.put(signature58, requestingPeer);
nowBySignature58.put(signature58, now);
peerMessageBySignature58.put(signature58, messagePeer);
}
if (signatureBySignature58.isEmpty()) {
return;
}
List<byte[]> hashes = new ArrayList<>();
boolean allChunksExist = false;
boolean hasMetadata = false;
List<ArbitraryTransactionData> transactionDataList;
try (final Repository repository = RepositoryManager.getRepository()) {
// Firstly we need to lookup this file on chain to get a list of its hashes
transactionDataList
= repository.getTransactionRepository()
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
.filter(data -> data instanceof ArbitraryTransactionData)
.map(data -> (ArbitraryTransactionData) data)
.collect(Collectors.toList());
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer"), e);
return;
}
for (ArbitraryTransactionData transactionData : transactionDataList) {
byte[] signature = transactionData.getSignature();
String signature58 = Base58.encode(signature);
List<byte[]> requestedHashes = requestedHashesBySignature58.get(signature58);
// Check if we're even allowed to serve data for this transaction
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
// Load file(s) and add any that exist to the list of hashes
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
try {
// Load file(s) and add any that exist to the list of hashes
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
if (requestedHashes == null || requestedHashes.isEmpty()) {
requestedHashes = new ArrayList<>();
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
if (requestedHashes == null || requestedHashes.isEmpty()) {
requestedHashes = new ArrayList<>();
// Add the metadata file
if (arbitraryDataFile.getMetadataHash() != null) {
requestedHashes.add(arbitraryDataFile.getMetadataHash());
hasMetadata = true;
// Add the metadata file
if (arbitraryDataFile.getMetadataHash() != null) {
requestedHashes.add(arbitraryDataFile.getMetadataHash());
hasMetadata = true;
}
// Add the chunk hashes
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
}
// Add complete file if there are no hashes
else {
requestedHashes.add(arbitraryDataFile.getHash());
}
}
// Add the chunk hashes
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
}
// Add complete file if there are no hashes
else {
requestedHashes.add(arbitraryDataFile.getHash());
// Assume all chunks exists, unless one can't be found below
allChunksExist = true;
for (byte[] requestedHash : requestedHashes) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
if (chunk.exists()) {
hashes.add(chunk.getHash());
//LOGGER.trace("Added hash {}", chunk.getHash58());
} else {
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
allChunksExist = false;
}
}
} catch (DataException e) {
LOGGER.error(e.getMessage(), e);
}
}
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
if (hasMetadata && hashes.size() == 1) {
hashes.clear();
}
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
Peer peer = peerMessage.getPeer();
Message message = peerMessage.getMessage();
Long now = nowBySignature58.get(signature58);
// We should only respond if we have at least one hash
String requestingPeer = requestingPeerBySignature58.get(signature58);
if (!hashes.isEmpty()) {
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
// We have all the chunks, so update requests map to reflect that we've sent it
// There is no need to keep track of the request, as we can serve all the chunks
if (allChunksExist) {
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, now);
arbitraryDataFileListRequests.put(message.getId(), newEntry);
}
// Assume all chunks exists, unless one can't be found below
allChunksExist = true;
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
hashes, NTP.getTime(), 0, ourAddress, true);
}
arbitraryDataFileListMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
LOGGER.debug("Couldn't send list of hashes");
continue;
}
if (allChunksExist) {
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
LOGGER.debug("No need for any forwarding because file list request is fully served");
continue;
}
}
// We may need to forward this request on
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
long totalRequestTime = now - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
// Relay request hasn't timed out yet, so can potentially be rebroadcast
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
relayGetArbitraryDataFileListMessage.setId(message.getId());
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer ->
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
);
for (byte[] requestedHash : requestedHashes) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
if (chunk.exists()) {
hashes.add(chunk.getHash());
//LOGGER.trace("Added hash {}", chunk.getHash58());
} else {
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
allChunksExist = false;
// This relay request has reached the maximum number of allowed hops
}
} else {
// This relay request has timed out
}
}
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
}
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
if (hasMetadata && hashes.size() == 1) {
hashes.clear();
}
// We should only respond if we have at least one hash
if (!hashes.isEmpty()) {
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
// We have all the chunks, so update requests map to reflect that we've sent it
// There is no need to keep track of the request, as we can serve all the chunks
if (allChunksExist) {
newEntry = new Triple<>(null, null, now);
arbitraryDataFileListRequests.put(message.getId(), newEntry);
}
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
hashes, NTP.getTime(), 0, ourAddress, true);
}
arbitraryDataFileListMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
LOGGER.debug("Couldn't send list of hashes");
peer.disconnect("failed to send list of hashes");
return;
}
LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
if (allChunksExist) {
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
LOGGER.debug("No need for any forwarding because file list request is fully served");
return;
}
}
// We may need to forward this request on
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
long totalRequestTime = now - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
// Relay request hasn't timed out yet, so can potentially be rebroadcast
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
relayGetArbitraryDataFileListMessage.setId(message.getId());
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer ->
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
);
}
else {
// This relay request has reached the maximum number of allowed hops
}
}
else {
// This relay request has timed out
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}

View File

@@ -1,6 +1,7 @@
package org.qortal.controller.arbitrary;
import com.google.common.net.InetAddresses;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
@@ -12,6 +13,7 @@ import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.PeerSendManagement;
import org.qortal.network.message.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
@@ -23,12 +25,16 @@ import org.qortal.utils.NTP;
import java.security.SecureRandom;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class ArbitraryDataFileManager extends Thread {
public static final int SEND_TIMEOUT_MS = 500;
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class);
private static ArbitraryDataFileManager instance;
@@ -48,7 +54,7 @@ public class ArbitraryDataFileManager extends Thread {
/**
* List to keep track of any arbitrary data file hash responses
*/
public final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
private final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
/**
* List to keep track of peers potentially available for direct connections, based on recent requests
@@ -65,8 +71,9 @@ public class ArbitraryDataFileManager extends Thread {
public static int MAX_FILE_HASH_RESPONSES = 1000;
private ArbitraryDataFileManager() {
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate( this::processResponses, 60, 1, TimeUnit.SECONDS);
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate(this::handleFileListRequestProcess, 60, 1, TimeUnit.SECONDS);
}
public static ArbitraryDataFileManager getInstance() {
@@ -76,18 +83,13 @@ public class ArbitraryDataFileManager extends Thread {
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data File Manager");
try {
// Use a fixed thread pool to execute the arbitrary data file requests
int threadCount = 5;
ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
for (int i = 0; i < threadCount; i++) {
arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
}
while (!isStopping) {
// Nothing to do yet
Thread.sleep(1000);
@@ -112,7 +114,6 @@ public class ArbitraryDataFileManager extends Thread {
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp);
final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
@@ -125,8 +126,7 @@ public class ArbitraryDataFileManager extends Thread {
// Fetch data files by hash
public boolean fetchArbitraryDataFiles(Repository repository,
Peer peer,
public boolean fetchArbitraryDataFiles(Peer peer,
byte[] signature,
ArbitraryTransactionData arbitraryTransactionData,
List<byte[]> hashes) throws DataException {
@@ -146,21 +146,15 @@ public class ArbitraryDataFileManager extends Thread {
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
Long startTime = NTP.getTime();
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, arbitraryTransactionData, signature, hash, null);
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, arbitraryTransactionData, signature, hash);
Long endTime = NTP.getTime();
if (receivedArbitraryDataFile != null) {
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
receivedAtLeastOneFile = true;
// Remove this hash from arbitraryDataFileHashResponses now that we have received it
arbitraryDataFileHashResponses.remove(hash58);
}
else {
LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime));
// Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it
arbitraryDataFileHashResponses.remove(hash58);
// Stop asking for files from this peer
break;
}
@@ -169,10 +163,6 @@ public class ArbitraryDataFileManager extends Thread {
LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
}
}
else {
// Remove this hash from arbitraryDataFileHashResponses because we have a local copy
arbitraryDataFileHashResponses.remove(hash58);
}
}
if (receivedAtLeastOneFile) {
@@ -191,14 +181,103 @@ public class ArbitraryDataFileManager extends Thread {
return receivedAtLeastOneFile;
}
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, Peer requestingPeer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
boolean fileAlreadyExists = existingFile.exists();
String hash58 = Base58.encode(hash);
// Lock to synchronize access to the list
private final Object arbitraryDataFileHashResponseLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService arbitraryDataFileHashResponseScheduler = Executors.newScheduledThreadPool(1);
public void addResponse( ArbitraryFileListResponseInfo responseInfo ) {
synchronized (arbitraryDataFileHashResponseLock) {
this.arbitraryDataFileHashResponses.add(responseInfo);
}
}
private void processResponses() {
try {
List<ArbitraryFileListResponseInfo> responsesToProcess;
synchronized (arbitraryDataFileHashResponseLock) {
responsesToProcess = new ArrayList<>(arbitraryDataFileHashResponses);
arbitraryDataFileHashResponses.clear();
}
if (responsesToProcess.isEmpty()) return;
Long now = NTP.getTime();
ArbitraryDataFileRequestThread.getInstance().processFileHashes(now, responsesToProcess, this);
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash) throws DataException {
ArbitraryDataFile arbitraryDataFile;
// Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) {
try {
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
boolean fileAlreadyExists = existingFile.exists();
String hash58 = Base58.encode(hash);
// Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) {
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
Message response = null;
try {
response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
} catch (InterruptedException e) {
// Will return below due to null response
}
arbitraryDataFileRequests.remove(hash58);
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
if (response == null) {
LOGGER.debug("Received null response from peer {}", peer);
return null;
}
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
return null;
}
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
} else {
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
arbitraryDataFile = existingFile;
}
if (arbitraryDataFile != null) {
arbitraryDataFile.save();
// If this is a metadata file then we need to update the cache
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
}
}
// We may need to remove the file list request, if we have all the files for this transaction
this.handleFileListRequests(signature);
}
} catch (DataException e) {
LOGGER.error(e.getMessage(), e);
arbitraryDataFile = null;
}
return arbitraryDataFile;
}
private void fetchFileForRelay(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
try {
String hash58 = Base58.encode(hash);
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
@@ -212,73 +291,73 @@ public class ArbitraryDataFileManager extends Thread {
arbitraryDataFileRequests.remove(hash58);
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
// We may need to remove the file list request, if we have all the files for this transaction
this.handleFileListRequests(signature);
if (response == null) {
LOGGER.debug("Received null response from peer {}", peer);
return null;
return;
}
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
return null;
}
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
} else {
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
arbitraryDataFile = existingFile;
}
if (arbitraryDataFile == null) {
// We don't have a file, so give up here
return null;
}
// We might want to forward the request to the peer that originally requested it
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
boolean isRelayRequest = (requestingPeer != null);
if (isRelayRequest) {
if (!fileAlreadyExists) {
// File didn't exist locally before the request, and it's a forwarding request, so delete it if it exists.
// It shouldn't exist on the filesystem yet, but leaving this here just in case.
arbitraryDataFile.delete(10);
}
}
else {
arbitraryDataFile.save();
}
// If this is a metadata file then we need to update the cache
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
}
}
return arbitraryDataFile;
}
private void handleFileListRequests(byte[] signature) {
try (final Repository repository = RepositoryManager.getRepository()) {
// Fetch the transaction data
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
if (arbitraryTransactionData == null) {
return;
}
boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
ArbitraryDataFile arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
if (allChunksExist) {
// Update requests map to reflect that we've received all chunks
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature);
if (arbitraryDataFile != null) {
// We might want to forward the request to the peer that originally requested it
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
Map<String, byte[]> signatureBySignature58 = new HashMap<>();
// Lock to synchronize access to the list
private final Object handleFileListRequestsLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService handleFileListRequestsScheduler = Executors.newScheduledThreadPool(1);
private void handleFileListRequests(byte[] signature) {
synchronized (handleFileListRequestsLock) {
signatureBySignature58.put(Base58.encode(signature), signature);
}
}
private void handleFileListRequestProcess() {
Map<String, byte[]> signaturesToProcess;
synchronized (handleFileListRequestsLock) {
signaturesToProcess = new HashMap<>(signatureBySignature58);
signatureBySignature58.clear();
}
if( signaturesToProcess.isEmpty() ) return;
try (final Repository repository = RepositoryManager.getRepository()) {
// Fetch the transaction data
List<ArbitraryTransactionData> arbitraryTransactionDataList
= ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signaturesToProcess.values()));
for( ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList ) {
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
if (completeFileExists) {
String signature58 = Base58.encode(arbitraryTransactionData.getSignature());
LOGGER.debug("All chunks or complete file exist for transaction {}", signature58);
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature58);
}
}
} catch (DataException e) {
LOGGER.debug("Unable to handle file list requests: {}", e.getMessage());
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
@@ -295,15 +374,14 @@ public class ArbitraryDataFileManager extends Thread {
LOGGER.debug("Received arbitrary data file - forwarding is needed");
// The ID needs to match that of the original request
message.setId(originalMessage.getId());
try {
// The ID needs to match that of the original request
message.setId(originalMessage.getId());
if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
requestingPeer.disconnect("failed to forward arbitrary data file");
}
else {
LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
PeerSendManagement.getInstance().getOrCreateSendManager(requestingPeer).queueMessage(message, SEND_TIMEOUT_MS);
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
@@ -577,13 +655,9 @@ public class ArbitraryDataFileManager extends Thread {
LOGGER.debug("Sending file {}...", arbitraryDataFile);
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
arbitraryDataFileMessage.setId(message.getId());
if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
peer.disconnect("failed to send file");
}
else {
LOGGER.debug("Sent file {}", arbitraryDataFile);
}
PeerSendManagement.getInstance().getOrCreateSendManager(peer).queueMessage(arbitraryDataFileMessage, SEND_TIMEOUT_MS);
}
else if (relayInfo != null) {
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
@@ -595,7 +669,7 @@ public class ArbitraryDataFileManager extends Thread {
LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
// No need to pass arbitraryTransactionData below because this is only used for metadata caching,
// and metadata isn't retained when relaying.
this.fetchArbitraryDataFile(peerToAsk, peer, null, signature, hash, message);
this.fetchFileForRelay(peerToAsk, peer, signature, hash, message);
}
else {
LOGGER.debug("Peer {} not found in relay info", peer);
@@ -617,7 +691,6 @@ public class ArbitraryDataFileManager extends Thread {
fileUnknownMessage.setId(message.getId());
if (!peer.sendMessage(fileUnknownMessage)) {
LOGGER.debug("Couldn't sent file-unknown response");
peer.disconnect("failed to send file-unknown response");
}
else {
LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);

View File

@@ -4,127 +4,186 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.EventBus;
import org.qortal.network.Peer;
import org.qortal.network.message.MessageType;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import org.qortal.utils.NamedThreadFactory;
import java.net.http.HttpResponse;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static java.lang.Thread.NORM_PRIORITY;
public class ArbitraryDataFileRequestThread implements Runnable {
public class ArbitraryDataFileRequestThread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileRequestThread.class);
public ArbitraryDataFileRequestThread() {
private static final Integer FETCHER_LIMIT_PER_PEER = Settings.getInstance().getMaxThreadsForMessageType(MessageType.GET_ARBITRARY_DATA_FILE);
private static final String FETCHER_THREAD_PREFIX = "Arbitrary Data Fetcher ";
private ConcurrentHashMap<String, ExecutorService> executorByPeer = new ConcurrentHashMap<>();
private ArbitraryDataFileRequestThread() {
cleanupExecutorByPeerScheduler.scheduleAtFixedRate(this::cleanupExecutorsByPeer, 1, 1, TimeUnit.MINUTES);
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data File Request Thread");
Thread.currentThread().setPriority(NORM_PRIORITY);
private static ArbitraryDataFileRequestThread instance = null;
public static ArbitraryDataFileRequestThread getInstance() {
if( instance == null ) {
instance = new ArbitraryDataFileRequestThread();
}
return instance;
}
private final ScheduledExecutorService cleanupExecutorByPeerScheduler = Executors.newScheduledThreadPool(1);
private void cleanupExecutorsByPeer() {
try {
while (!Controller.isStopping()) {
Long now = NTP.getTime();
this.processFileHashes(now);
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
this.executorByPeer.forEach((key, value) -> {
if (value instanceof ThreadPoolExecutor) {
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) value;
if (threadPoolExecutor.getActiveCount() == 0) {
threadPoolExecutor.shutdown();
if (this.executorByPeer.computeIfPresent(key, (k, v) -> null) == null) {
LOGGER.trace("removed executor: peer = " + key);
}
}
} else {
LOGGER.warn("casting issue in cleanup");
}
});
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
private void processFileHashes(Long now) throws InterruptedException {
public void processFileHashes(Long now, List<ArbitraryFileListResponseInfo> responseInfos, ArbitraryDataFileManager arbitraryDataFileManager) {
if (Controller.isStopping()) {
return;
}
ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
String signature58 = null;
String hash58 = null;
Peer peer = null;
boolean shouldProcess = false;
Map<String, byte[]> signatureBySignature58 = new HashMap<>(responseInfos.size());
Map<String, List<ArbitraryFileListResponseInfo>> responseInfoBySignature58 = new HashMap<>();
synchronized (arbitraryDataFileManager.arbitraryDataFileHashResponses) {
if (!arbitraryDataFileManager.arbitraryDataFileHashResponses.isEmpty()) {
for( ArbitraryFileListResponseInfo responseInfo : responseInfos) {
// Sort by lowest number of node hops first
Comparator<ArbitraryFileListResponseInfo> lowestHopsFirstComparator =
Comparator.comparingInt(ArbitraryFileListResponseInfo::getRequestHops);
arbitraryDataFileManager.arbitraryDataFileHashResponses.sort(lowestHopsFirstComparator);
if( responseInfo == null ) continue;
Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.iterator();
while (iterator.hasNext()) {
if (Controller.isStopping()) {
return;
}
ArbitraryFileListResponseInfo responseInfo = (ArbitraryFileListResponseInfo) iterator.next();
if (responseInfo == null) {
iterator.remove();
continue;
}
hash58 = responseInfo.getHash58();
peer = responseInfo.getPeer();
signature58 = responseInfo.getSignature58();
Long timestamp = responseInfo.getTimestamp();
if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
// Ignore - to be deleted
iterator.remove();
continue;
}
// Skip if already requesting, but don't remove, as we might want to retry later
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(hash58)) {
// Already requesting - leave this attempt for later
continue;
}
// We want to process this file
shouldProcess = true;
iterator.remove();
break;
}
if (Controller.isStopping()) {
return;
}
Peer peer = responseInfo.getPeer();
// if relay timeout, then move on
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || responseInfo.getSignature58() == null || peer == null) {
continue;
}
// Skip if already requesting, but don't remove, as we might want to retry later
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(responseInfo.getHash58())) {
// Already requesting - leave this attempt for later
arbitraryDataFileManager.addResponse(responseInfo); // don't remove -> adding back, beacause it was removed already above
continue;
}
byte[] hash = Base58.decode(responseInfo.getHash58());
byte[] signature = Base58.decode(responseInfo.getSignature58());
// check for null
if (signature == null || hash == null || peer == null) {
continue;
}
// We want to process this file, store and map data to process later
signatureBySignature58.put(responseInfo.getSignature58(), signature);
responseInfoBySignature58
.computeIfAbsent(responseInfo.getSignature58(), signature58 -> new ArrayList<>())
.add(responseInfo);
}
if (!shouldProcess) {
// Nothing to do
Thread.sleep(1000L);
return;
}
// if there are no signatures, then there is nothing to process and nothing query the database
if( signatureBySignature58.isEmpty() ) return;
byte[] hash = Base58.decode(hash58);
byte[] signature = Base58.decode(signature58);
List<ArbitraryTransactionData> arbitraryTransactionDataList = new ArrayList<>();
// Fetch the transaction data
try (final Repository repository = RepositoryManager.getRepository()) {
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
if (arbitraryTransactionData == null) {
return;
}
if (signature == null || hash == null || peer == null || arbitraryTransactionData == null) {
return;
}
LOGGER.trace("Fetching file {} from peer {} via request thread...", hash58, peer);
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
arbitraryTransactionDataList.addAll(
ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signatureBySignature58.values())));
} catch (DataException e) {
LOGGER.debug("Unable to process file hashes: {}", e.getMessage());
LOGGER.warn("Unable to fetch transaction data: {}", e.getMessage());
}
if( !arbitraryTransactionDataList.isEmpty() ) {
long start = System.currentTimeMillis();
for(ArbitraryTransactionData data : arbitraryTransactionDataList ) {
String signature58 = Base58.encode(data.getSignature());
for( ArbitraryFileListResponseInfo responseInfo : responseInfoBySignature58.get(signature58)) {
Runnable fetcher = () -> arbitraryDataFileFetcher(arbitraryDataFileManager, responseInfo, data);
this.executorByPeer
.computeIfAbsent(
responseInfo.getPeer().toString(),
peer -> Executors.newFixedThreadPool(
FETCHER_LIMIT_PER_PEER,
new NamedThreadFactory(FETCHER_THREAD_PREFIX + responseInfo.getPeer().toString(), NORM_PRIORITY)
)
)
.execute(fetcher);
}
}
long timeLapse = System.currentTimeMillis() - start;
}
}
}
private void arbitraryDataFileFetcher(ArbitraryDataFileManager arbitraryDataFileManager, ArbitraryFileListResponseInfo responseInfo, ArbitraryTransactionData arbitraryTransactionData) {
try {
Long now = NTP.getTime();
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT ) {
Peer peer = responseInfo.getPeer();
String hash58 = responseInfo.getHash58();
String signature58 = responseInfo.getSignature58();
LOGGER.debug("Peer {} version {} didn't fetch data file {} for signature {} due to relay timeout.", peer, peer.getPeersVersionString(), hash58, signature58);
return;
}
arbitraryDataFileManager.fetchArbitraryDataFiles(
responseInfo.getPeer(),
arbitraryTransactionData.getSignature(),
arbitraryTransactionData,
Arrays.asList(Base58.decode(responseInfo.getHash58()))
);
} catch (DataException e) {
LOGGER.warn("Unable to process file hashes: {}", e.getMessage());
}
}
}

View File

@@ -42,10 +42,10 @@ public class ArbitraryDataManager extends Thread {
private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value
/** Request timeout when transferring arbitrary data */
public static final long ARBITRARY_REQUEST_TIMEOUT = 12 * 1000L; // ms
public static final long ARBITRARY_REQUEST_TIMEOUT = 24 * 1000L; // ms
/** Maximum time to hold information about an in-progress relay */
public static final long ARBITRARY_RELAY_TIMEOUT = 60 * 1000L; // ms
public static final long ARBITRARY_RELAY_TIMEOUT = 120 * 1000L; // ms
/** Maximum time to hold direct peer connection information */
public static final long ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT = 2 * 60 * 1000L; // ms

View File

@@ -47,15 +47,15 @@ public class ArbitraryDataStorageManager extends Thread {
private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes
/** Treat storage as full at 90% usage, to reduce risk of going over the limit.
/** Treat storage as full at 80% usage, to reduce risk of going over the limit.
* This is necessary because we don't calculate total storage values before every write.
* It also helps avoid a fetch/delete loop, as we will stop fetching before the hard limit.
* This must be lower than DELETION_THRESHOLD. */
private static final double STORAGE_FULL_THRESHOLD = 0.90f; // 90%
private static final double STORAGE_FULL_THRESHOLD = 0.8f; // 80%
/** Start deleting files once we reach 98% usage.
/** Start deleting files once we reach 90% usage.
* This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */
public static final double DELETION_THRESHOLD = 0.98f; // 98%
public static final double DELETION_THRESHOLD = 0.9f; // 90%
private static final long PER_NAME_STORAGE_MULTIPLIER = 4L;

View File

@@ -24,6 +24,11 @@ import org.qortal.utils.Triple;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*;
@@ -61,6 +66,7 @@ public class ArbitraryMetadataManager {
private ArbitraryMetadataManager() {
scheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryMetadataMessage, 60, 1, TimeUnit.SECONDS);
}
public static ArbitraryMetadataManager getInstance() {
@@ -354,9 +360,8 @@ public class ArbitraryMetadataManager {
// Forward to requesting peer
LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer);
if (!requestingPeer.sendMessage(forwardArbitraryMetadataMessage)) {
requestingPeer.disconnect("failed to forward arbitrary metadata");
}
requestingPeer.sendMessage(forwardArbitraryMetadataMessage);
}
}
}
@@ -371,107 +376,159 @@ public class ArbitraryMetadataManager {
}
}
// List to collect messages
private final List<PeerMessage> messageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object lock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
public void onNetworkGetArbitraryMetadataMessage(Peer peer, Message message) {
// Don't respond if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) message;
byte[] signature = getArbitraryMetadataMessage.getSignature();
String signature58 = Base58.encode(signature);
Long now = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
// If we've seen this request recently, then ignore
if (arbitraryMetadataRequests.putIfAbsent(message.getId(), newEntry) != null) {
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peer, signature58);
return;
}
LOGGER.debug("Received metadata request from peer {} for signature {}", peer, signature58);
ArbitraryTransactionData transactionData = null;
ArbitraryDataFile metadataFile = null;
try (final Repository repository = RepositoryManager.getRepository()) {
// Firstly we need to lookup this file on chain to get its metadata hash
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
if (transactionData instanceof ArbitraryTransactionData) {
// Check if we're even allowed to serve metadata for this transaction
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
byte[] metadataHash = transactionData.getMetadataHash();
if (metadataHash != null) {
// Load metadata file
metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
}
}
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata for peer %s", peer), e);
}
// We should only respond if we have the metadata file
if (metadataFile != null && metadataFile.exists()) {
// We have the metadata file, so update requests map to reflect that we've sent it
newEntry = new Triple<>(null, null, now);
arbitraryMetadataRequests.put(message.getId(), newEntry);
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, metadataFile);
arbitraryMetadataMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryMetadataMessage)) {
LOGGER.debug("Couldn't send metadata");
peer.disconnect("failed to send metadata");
return;
}
LOGGER.debug("Sent metadata");
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
LOGGER.debug("No need for any forwarding because metadata request is fully served");
return;
}
// We may need to forward this request on
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
long requestTime = getArbitraryMetadataMessage.getRequestTime();
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
long totalRequestTime = now - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
// Relay request hasn't timed out yet, so can potentially be rebroadcast
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
relayGetArbitraryMetadataMessage.setId(message.getId());
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer ->
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
}
else {
// This relay request has reached the maximum number of allowed hops
}
}
else {
// This relay request has timed out
}
synchronized (lock) {
messageList.add(new PeerMessage(peer, message));
}
}
private void processNetworkGetArbitraryMetadataMessage() {
try {
List<PeerMessage> messagesToProcess;
synchronized (lock) {
messagesToProcess = new ArrayList<>(messageList);
messageList.clear();
}
Map<String, byte[]> signatureBySignature58 = new HashMap<>((messagesToProcess.size()));
Map<String, Long> nowBySignature58 = new HashMap<>(messagesToProcess.size());
Map<String,PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
for( PeerMessage peerMessage : messagesToProcess) {
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
byte[] signature = getArbitraryMetadataMessage.getSignature();
String signature58 = Base58.encode(signature);
Long now = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peerMessage.peer, now);
// If we've seen this request recently, then ignore
if (arbitraryMetadataRequests.putIfAbsent(peerMessage.message.getId(), newEntry) != null) {
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peerMessage.peer, signature58);
continue;
}
LOGGER.debug("Received metadata request from peer {} for signature {}", peerMessage.peer, signature58);
signatureBySignature58.put(signature58, signature);
nowBySignature58.put(signature58, now);
peerMessageBySignature58.put(signature58, peerMessage);
}
if( signatureBySignature58.isEmpty() ) return;
List<TransactionData> transactionDataList;
try (final Repository repository = RepositoryManager.getRepository()) {
// Firstly we need to lookup this file on chain to get its metadata hash
transactionDataList = repository.getTransactionRepository().fromSignatures(new ArrayList(signatureBySignature58.values()));
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary transactions"), e);
return;
}
Map<String, ArbitraryTransactionData> dataBySignature58
= transactionDataList.stream()
.filter(data -> data instanceof ArbitraryTransactionData)
.map(ArbitraryTransactionData.class::cast)
.collect(Collectors.toMap(data -> Base58.encode(data.getSignature()), Function.identity()));
for(Map.Entry<String, ArbitraryTransactionData> entry : dataBySignature58.entrySet()) {
String signature58 = entry.getKey();
ArbitraryTransactionData transactionData = entry.getValue();
try {
// Check if we're even allowed to serve metadata for this transaction
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
byte[] metadataHash = transactionData.getMetadataHash();
if (metadataHash != null) {
// Load metadata file
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, transactionData.getSignature());
// We should only respond if we have the metadata file
if (metadataFile != null && metadataFile.exists()) {
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
Message message = peerMessage.message;
Peer peer = peerMessage.peer;
// We have the metadata file, so update requests map to reflect that we've sent it
Triple newEntry = new Triple<>(null, null, nowBySignature58.get(signature58));
arbitraryMetadataRequests.put(message.getId(), newEntry);
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(entry.getValue().getSignature(), metadataFile);
arbitraryMetadataMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryMetadataMessage)) {
LOGGER.debug("Couldn't send metadata");
continue;
}
LOGGER.debug("Sent metadata");
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
LOGGER.debug("No need for any forwarding because metadata request is fully served");
}
}
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata"), e);
}
// We may need to forward this request on
boolean isBlocked = (transactionDataList == null || ListUtils.isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
long requestTime = getArbitraryMetadataMessage.getRequestTime();
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
long totalRequestTime = nowBySignature58.get(signature58) - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
// Relay request hasn't timed out yet, so can potentially be rebroadcast
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
byte[] signature = signatureBySignature58.get(signature58);
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
relayGetArbitraryMetadataMessage.setId(getArbitraryMetadataMessage.getId());
Peer peer = peerMessage.peer;
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer ->
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
} else {
// This relay request has reached the maximum number of allowed hops
}
} else {
// This relay request has timed out
}
}
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
}

View File

@@ -0,0 +1,130 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.ListUtils;
import org.qortal.utils.NamedThreadFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.OptionalInt;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class Follower {
private static final Logger LOGGER = LogManager.getLogger(Follower.class);
private ScheduledExecutorService service
= Executors.newScheduledThreadPool(2, new NamedThreadFactory("Follower", Thread.NORM_PRIORITY));
private Follower() {
}
private static Follower instance;
public static Follower getInstance() {
if( instance == null ) {
instance = new Follower();
}
return instance;
}
public void start() {
// fetch arbitrary transactions from followed names from the last 100 blocks every 2 minutes
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.of(100)), 10, 2, TimeUnit.MINUTES);
// fetch arbitrary transaction from followed names from any block every 24 hours
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.empty()), 4, 24, TimeUnit.HOURS);
}
private void fetch(OptionalInt limit) {
try {
// for each followed name, get arbitraty transactions, then examine those transactions before fetching
for (String name : ListUtils.followedNames()) {
List<ArbitraryTransactionData> transactionsInReverseOrder;
// open database to get the transactions in reverse order for the followed name
try (final Repository repository = RepositoryManager.getRepository()) {
List<ArbitraryTransactionData> latestArbitraryTransactionsByName
= repository.getArbitraryRepository().getLatestArbitraryTransactionsByName(name);
if (limit.isPresent()) {
final int blockHeightThreshold = repository.getBlockRepository().getBlockchainHeight() - limit.getAsInt();
transactionsInReverseOrder
= latestArbitraryTransactionsByName.stream().filter(tx -> tx.getBlockHeight() > blockHeightThreshold)
.collect(Collectors.toList());
} else {
transactionsInReverseOrder = latestArbitraryTransactionsByName;
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
transactionsInReverseOrder = new ArrayList<>(0);
}
// collect process transaction hashes, so we don't fetch outdated transactions
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
// for each arbitrary transaction for the followed name process, evaluate, fetch
for (ArbitraryTransactionData arbitraryTransaction : transactionsInReverseOrder) {
boolean examined = false;
try (final Repository repository = RepositoryManager.getRepository()) {
// if not processed
if (!processedTransactions.contains(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction))) {
boolean isLocal = repository.getArbitraryRepository().isDataLocal(arbitraryTransaction.getSignature());
// if not local, then continue to evaluate
if (!isLocal) {
// evaluate fetching status for this transaction on this node
ArbitraryDataExamination examination = storageManager.shouldPreFetchData(repository, arbitraryTransaction);
// if the evaluation passed, then fetch
examined = examination.isPass();
}
// if locally stored, then nothing needs to be done
// add to processed transactions
processedTransactions.add(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction));
}
}
// if passed examination for fetching, then fetch
if (examined) {
LOGGER.info("for {} on {}, fetching {}", name, arbitraryTransaction.getService(), arbitraryTransaction.getIdentifier());
boolean fetched = ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(arbitraryTransaction);
LOGGER.info("fetched = " + fetched);
}
// pause a second before moving on to another transaction
Thread.sleep(1000);
}
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
}

View File

@@ -0,0 +1,22 @@
package org.qortal.controller.arbitrary;
import org.qortal.network.Peer;
import org.qortal.network.message.Message;
public class PeerMessage {
Peer peer;
Message message;
public PeerMessage(Peer peer, Message message) {
this.peer = peer;
this.message = message;
}
public Peer getPeer() {
return peer;
}
public Message getMessage() {
return message;
}
}

View File

@@ -8,6 +8,7 @@ import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.model.crosschain.TradeBotCreateRequest;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.arbitrary.PeerMessage;
import org.qortal.controller.tradebot.AcctTradeBot.ResponseResult;
import org.qortal.crosschain.*;
import org.qortal.crypto.Crypto;
@@ -37,7 +38,12 @@ import org.qortal.utils.NTP;
import java.awt.TrayIcon.MessageType;
import java.security.SecureRandom;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Performing cross-chain trading steps on behalf of user.
@@ -118,6 +124,9 @@ public class TradeBot implements Listener {
private Map<String, Long> validTrades = new HashMap<>();
private TradeBot() {
tradePresenceMessageScheduler.scheduleAtFixedRate( this::processTradePresencesMessages, 60, 1, TimeUnit.SECONDS);
EventBus.INSTANCE.addListener(event -> TradeBot.getInstance().listen(event));
}
@@ -551,77 +560,139 @@ public class TradeBot implements Listener {
}
}
// List to collect messages
private final List<PeerMessage> tradePresenceMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object tradePresenceMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService tradePresenceMessageScheduler = Executors.newScheduledThreadPool(1);
public void onTradePresencesMessage(Peer peer, Message message) {
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) message;
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
synchronized (tradePresenceMessageLock) {
tradePresenceMessageList.add(new PeerMessage(peer, message));
}
}
long now = NTP.getTime();
// Timestamps before this are too far into the past
long pastThreshold = now;
// Timestamps after this are too far into the future
long futureThreshold = now + PRESENCE_LIFETIME;
public void processTradePresencesMessages() {
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
try {
List<PeerMessage> messagesToProcess;
synchronized (tradePresenceMessageLock) {
messagesToProcess = new ArrayList<>(tradePresenceMessageList);
tradePresenceMessageList.clear();
}
int newCount = 0;
if( messagesToProcess.isEmpty() ) return;
try (final Repository repository = RepositoryManager.getRepository()) {
for (TradePresenceData peersTradePresence : peersTradePresences) {
long timestamp = peersTradePresence.getTimestamp();
Map<Peer, List<TradePresenceData>> tradePresencesByPeer = new HashMap<>(messagesToProcess.size());
// Ignore if timestamp is out of bounds
if (timestamp < pastThreshold || timestamp > futureThreshold) {
if (timestamp < pastThreshold)
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
);
else
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
// map all trade presences from the messages to their peer
for( PeerMessage peerMessage : messagesToProcess ) {
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) peerMessage.getMessage();
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
tradePresencesByPeer.put(peerMessage.getPeer(), peersTradePresences);
}
long now = NTP.getTime();
// Timestamps before this are too far into the past
long pastThreshold = now;
// Timestamps after this are too far into the future
long futureThreshold = now + PRESENCE_LIFETIME;
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
int newCount = 0;
Map<String, List<Peer>> peersByAtAddress = new HashMap<>(tradePresencesByPeer.size());
Map<String, TradePresenceData> tradePresenceByAtAddress = new HashMap<>(tradePresencesByPeer.size());
// for each batch of trade presence data from a peer, validate and populate the maps declared above
for ( Map.Entry<Peer, List<TradePresenceData>> entry: tradePresencesByPeer.entrySet()) {
Peer peer = entry.getKey();
for( TradePresenceData peersTradePresence : entry.getValue() ) {
// TradePresenceData peersTradePresence
long timestamp = peersTradePresence.getTimestamp();
// Ignore if timestamp is out of bounds
if (timestamp < pastThreshold || timestamp > futureThreshold) {
if (timestamp < pastThreshold)
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
);
else
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
);
continue;
}
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
if (timestamp == existingTradeData.getTimestamp())
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
peersTradePresence.getAtAddress(), peer, timestamp
);
else
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
);
continue;
}
// Check timestamp signature
byte[] timestampSignature = peersTradePresence.getSignature();
byte[] timestampBytes = Longs.toByteArray(timestamp);
byte[] publicKey = peersTradePresence.getPublicKey();
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
peersTradePresence.getAtAddress(), peer
);
continue;
continue;
}
peersByAtAddress.computeIfAbsent(peersTradePresence.getAtAddress(), address -> new ArrayList<>()).add(peer);
tradePresenceByAtAddress.put(peersTradePresence.getAtAddress(), peersTradePresence);
}
}
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
if( tradePresenceByAtAddress.isEmpty() ) return;
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
if (timestamp == existingTradeData.getTimestamp())
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
peersTradePresence.getAtAddress(), peer, timestamp
);
else
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
);
List<ATData> atDataList;
try (final Repository repository = RepositoryManager.getRepository()) {
atDataList = repository.getATRepository().fromATAddresses( new ArrayList<>(tradePresenceByAtAddress.keySet()) );
} catch (DataException e) {
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
return;
}
continue;
}
Map<String, Supplier<ACCT>> supplierByAtAddress = new HashMap<>(atDataList.size());
// Check timestamp signature
byte[] timestampSignature = peersTradePresence.getSignature();
byte[] timestampBytes = Longs.toByteArray(timestamp);
byte[] publicKey = peersTradePresence.getPublicKey();
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
peersTradePresence.getAtAddress(), peer
);
List<ATData> validatedAtDataList = new ArrayList<>(atDataList.size());
continue;
}
// for each trade
for( ATData atData : atDataList ) {
ATData atData = repository.getATRepository().fromATAddress(peersTradePresence.getAtAddress());
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(atData.getATAddress());
if (atData == null || atData.getIsFrozen() || atData.getIsFinished()) {
if (atData == null)
LOGGER.trace("Ignoring trade presence {} from peer {} as AT doesn't exist",
peersTradePresence.getAtAddress(), peer
LOGGER.trace("Ignoring trade presence {} from peer as AT doesn't exist",
peersTradePresence.getAtAddress()
);
else
LOGGER.trace("Ignoring trade presence {} from peer {} as AT is frozen or finished",
peersTradePresence.getAtAddress(), peer
LOGGER.trace("Ignoring trade presence {} from peer as AT is frozen or finished",
peersTradePresence.getAtAddress()
);
continue;
@@ -630,51 +701,87 @@ public class TradeBot implements Listener {
ByteArray atCodeHash = ByteArray.wrap(atData.getCodeHash());
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(atCodeHash);
if (acctSupplier == null) {
LOGGER.trace("Ignoring trade presence {} from peer {} as AT isn't a known ACCT?",
peersTradePresence.getAtAddress(), peer
LOGGER.trace("Ignoring trade presence {} from peer as AT isn't a known ACCT?",
peersTradePresence.getAtAddress()
);
continue;
}
CrossChainTradeData tradeData = acctSupplier.get().populateTradeData(repository, atData);
if (tradeData == null) {
LOGGER.trace("Ignoring trade presence {} from peer {} as trade data not found?",
peersTradePresence.getAtAddress(), peer
);
continue;
}
// Convert signer's public key to address form
String signerAddress = peersTradePresence.getTradeAddress();
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
peersTradePresence.getAtAddress(), peer
);
continue;
}
// This is new to us
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
++newCount;
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
peersTradePresence.getAtAddress(), peer, timestamp
);
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
validatedAtDataList.add(atData);
}
} catch (DataException e) {
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
}
if (newCount > 0) {
LOGGER.debug("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
rebuildSafeAllTradePresences();
// populated data for each trade
List<CrossChainTradeData> crossChainTradeDataList;
// validated trade data grouped by code (cross chain coin)
Map<ByteArray, List<ATData>> atDataByCodeHash
= validatedAtDataList.stream().collect(
Collectors.groupingBy(data -> ByteArray.wrap(data.getCodeHash())));
try (final Repository repository = RepositoryManager.getRepository()) {
crossChainTradeDataList = new ArrayList<>();
// for each code (cross chain coin), get each trade, then populate trade data
for( Map.Entry<ByteArray, List<ATData>> entry : atDataByCodeHash.entrySet() ) {
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(entry.getKey());
crossChainTradeDataList.addAll(
acctSupplier.get().populateTradeDataList(
repository,
entry.getValue()
)
.stream().filter( data -> data != null )
.collect(Collectors.toList())
);
}
} catch (DataException e) {
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
return;
}
// for each populated trade data, validate and fire event
for( CrossChainTradeData tradeData : crossChainTradeDataList ) {
List<Peer> peers = peersByAtAddress.get(tradeData.qortalAtAddress);
for( Peer peer : peers ) {
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(tradeData.qortalAtAddress);
// Convert signer's public key to address form
String signerAddress = peersTradePresence.getTradeAddress();
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
peersTradePresence.getAtAddress(), peer
);
continue;
}
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
// This is new to us
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
++newCount;
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
peersTradePresence.getAtAddress(), peer, tradeData.creationTimestamp
);
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
}
}
if (newCount > 0) {
LOGGER.info("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
rebuildSafeAllTradePresences();
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}

View File

@@ -6,6 +6,9 @@ import org.qortal.data.crosschain.CrossChainTradeData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import java.util.List;
import java.util.OptionalLong;
public interface ACCT {
public byte[] getCodeBytesHash();
@@ -16,8 +19,12 @@ public interface ACCT {
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException;
public List<CrossChainTradeData> populateTradeDataList(Repository respository, List<ATData> atDataList) throws DataException;
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException;
CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException;
public byte[] buildCancelMessage(String creatorQortalAddress);
public byte[] findSecretA(Repository repository, CrossChainTradeData crossChainTradeData) throws DataException;

View File

@@ -1,5 +1,6 @@
package org.qortal.crosschain;
import org.bitcoinj.core.Coin;
import org.bitcoinj.core.Context;
import org.bitcoinj.core.NetworkParameters;
import org.bitcoinj.core.Transaction;
@@ -14,15 +15,21 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class Bitcoin extends Bitcoiny {
public static final String CURRENCY_CODE = "BTC";
private static final long MINIMUM_ORDER_AMOUNT = 100000; // 0.001 BTC minimum order, due to high fees
// Locking fee to lock in a QORT for BTC. This is the default value that the user should reset to
// a value inline with the BTC fee market. This is 5 sats per kB.
private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(5_000); // 0.00005 BTC per 1000 bytes
// Temporary values until a dynamic fee system is written.
private static final long NEW_FEE_AMOUNT = 6_000L;
private static final long MINIMUM_ORDER_AMOUNT = 100_000; // 0.001 BTC minimum order, due to high fees
// Default value until user resets fee to compete with the current market. This is a total value for a
// p2sh transaction, size 300 kB, 5 sats per kB
private static final long NEW_FEE_AMOUNT = 1_500L;
private static final long NON_MAINNET_FEE = 1000L; // enough for TESTNET3 and should be OK for REGTEST
@@ -111,7 +118,7 @@ public class Bitcoin extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -173,14 +180,14 @@ public class Bitcoin extends Bitcoiny {
}
};
private long feeCeiling = NEW_FEE_AMOUNT;
private AtomicLong feeRequired = new AtomicLong(NEW_FEE_AMOUNT);
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -196,7 +203,7 @@ public class Bitcoin extends Bitcoiny {
// Constructors and instance
private Bitcoin(BitcoinNet bitcoinNet, BitcoinyBlockchainProvider blockchain, Context bitcoinjContext, String currencyCode) {
super(blockchain, bitcoinjContext, currencyCode, bitcoinjContext.getFeePerKb());
super(blockchain, bitcoinjContext, currencyCode, DEFAULT_FEE_PER_KB);
this.bitcoinNet = bitcoinNet;
LOGGER.info(() -> String.format("Starting Bitcoin support using %s", this.bitcoinNet.name()));
@@ -242,14 +249,14 @@ public class Bitcoin extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.bitcoinNet.getFeeCeiling();
public long getFeeRequired() {
return this.bitcoinNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.bitcoinNet.setFeeCeiling( fee );
this.bitcoinNet.setFeeRequired( fee );
}
/**
* Returns bitcoinj transaction sending <tt>amount</tt> to <tt>recipient</tt> using 20 sat/byte fee.

View File

@@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
import com.google.common.primitives.Bytes;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -608,7 +610,14 @@ public class BitcoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -617,13 +626,14 @@ public class BitcoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -636,8 +646,13 @@ public class BitcoinACCTv1 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -569,7 +571,14 @@ public class BitcoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -578,13 +587,14 @@ public class BitcoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -597,8 +607,13 @@ public class BitcoinACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -8,6 +8,8 @@ import org.bitcoinj.core.*;
import org.bitcoinj.crypto.ChildNumber;
import org.bitcoinj.crypto.DeterministicHierarchy;
import org.bitcoinj.crypto.DeterministicKey;
import org.bitcoinj.crypto.HDPath;
import org.bitcoinj.params.AbstractBitcoinNetParams;
import org.bitcoinj.script.Script.ScriptType;
import org.bitcoinj.script.ScriptBuilder;
import org.bitcoinj.wallet.DeterministicKeyChain;
@@ -25,7 +27,7 @@ import java.util.*;
import java.util.stream.Collectors;
/** Bitcoin-like (Bitcoin, Litecoin, etc.) support */
public abstract class Bitcoiny implements ForeignBlockchain {
public abstract class Bitcoiny extends AbstractBitcoinNetParams implements ForeignBlockchain {
protected static final Logger LOGGER = LogManager.getLogger(Bitcoiny.class);
@@ -65,6 +67,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
// Constructors and instance
protected Bitcoiny(BitcoinyBlockchainProvider blockchainProvider, Context bitcoinjContext, String currencyCode, Coin feePerKb) {
this.genesisBlock = this.getGenesisBlock();
this.blockchainProvider = blockchainProvider;
this.bitcoinjContext = bitcoinjContext;
this.currencyCode = currencyCode;
@@ -74,6 +77,15 @@ public abstract class Bitcoiny implements ForeignBlockchain {
}
// Getters & setters
@Override
public String getPaymentProtocolId() {
return this.id;
}
@Override
public Block getGenesisBlock() {
return this.genesisBlock;
}
public BitcoinyBlockchainProvider getBlockchainProvider() {
return this.blockchainProvider;
@@ -590,15 +602,27 @@ public abstract class Bitcoiny implements ForeignBlockchain {
return new AddressInfo(
address.toString(),
toIntegerList( key.getPath()),
toIntegerList( key.getPath() ),
summingUnspentOutputs(address.toString()),
key.getPathAsString(),
transactionCount,
candidates.contains(address.toString()));
}
private static List<Integer> toIntegerList(ImmutableList<ChildNumber> path) {
/**
* <p>Convert BitcoinJ native type to List of Integers, BitcoinJ v16 compatible
* </p>
*
* @param path path to deterministic key
* @return Array of Ints representing the keys position in the tree
* @since v4.7.2
*/
private static List<Integer> toIntegerList(HDPath path) {
return path.stream().map(ChildNumber::num).collect(Collectors.toList());
}
// BitcoinJ v15 compatible
private static List<Integer> toIntegerList(ImmutableList<ChildNumber> path) {
return path.stream().map(ChildNumber::num).collect(Collectors.toList());
}
@@ -840,9 +864,9 @@ public abstract class Bitcoiny implements ForeignBlockchain {
} while (true);
}
public abstract long getFeeCeiling();
public abstract long getFeeRequired();
public abstract void setFeeCeiling(long fee);
public abstract void setFeeRequired(long fee);
// UTXOProvider support

View File

@@ -1,5 +1,6 @@
package org.qortal.crosschain;
import org.bitcoinj.core.Block;
import org.bitcoinj.core.Coin;
import org.bitcoinj.core.Context;
import org.bitcoinj.core.NetworkParameters;
@@ -89,7 +90,7 @@ public class BitcoinyTBD extends Bitcoiny {
NetTBD netTBD
= new NetTBD(
bitcoinyTBDRequest.getNetworkName(),
bitcoinyTBDRequest.getFeeCeiling(),
bitcoinyTBDRequest.getFeeRequired(),
networkParams,
Collections.emptyList(),
bitcoinyTBDRequest.getExpectedGenesisHash()
@@ -134,18 +135,30 @@ public class BitcoinyTBD extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) throws ForeignBlockchainException {
return this.netTBD.getFeeCeiling();
return this.netTBD.getFeeRequired();
}
@Override
public long getFeeCeiling() {
public long getFeeRequired() {
return this.netTBD.getFeeCeiling();
return this.netTBD.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.netTBD.setFeeCeiling( fee );
this.netTBD.setFeeRequired( fee );
}
@Override
public String getPaymentProtocolId() {
return params.getId();
}
@Override
public Block getGenesisBlock() {
if(genesisBlock == null)
genesisBlock = params.getGenesisBlock();
return this.genesisBlock;
}
}

View File

@@ -98,9 +98,10 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
LOGGER.info( "Creating Genesis Block ...");
// BitcoinJ v16 has a new native method for this
//this.genesisBlock = CoinParamsUtil.createGenesisBlockFromRequest(this, request);
LOGGER.info("Created Genesis Block: genesisBlock = " + genesisBlock );
// LOGGER.info("Created Genesis Block: genesisBlock = " + genesisBlock );
// this is 100 for each coin from what I can tell
this.spendableCoinbaseDepth = 100;
@@ -113,8 +114,9 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
//
// LOGGER.info("request = " + request);
//
// checkState(genesisHash.equals(request.getExpectedGenesisHash()));
this.alertSigningKey = Hex.decode(request.getPubKey());
// checkState(genesisHash.equals(request.getExpectedGenesisHash()))
// alertSigningKey is removed in v16
// this.alertSigningKey = Hex.decode(request.getPubKey());
this.majorityEnforceBlockUpgrade = request.getMajorityEnforceBlockUpgrade();
this.majorityRejectBlockOutdated = request.getMajorityRejectBlockOutdated();
@@ -221,6 +223,12 @@ public class DeterminedNetworkParams extends NetworkParameters implements Altcoi
}
}
@Override
public Block getGenesisBlock() {
//ToDo: Finish
return null;
}
/**
* Get the difficulty target expected for the next block. This includes all
* the weird cases for Litecoin such as testnet blocks which can be maximum

View File

@@ -14,6 +14,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class Digibyte extends Bitcoiny {
@@ -59,7 +60,7 @@ public class Digibyte extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -109,14 +110,14 @@ public class Digibyte extends Bitcoiny {
}
};
private long feeCeiling = MAINNET_FEE;
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -178,13 +179,13 @@ public class Digibyte extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.digibyteNet.getFeeCeiling();
public long getFeeRequired() {
return this.digibyteNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.digibyteNet.setFeeCeiling( fee );
this.digibyteNet.setFeeRequired( fee );
}
}

View File

@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -569,7 +571,14 @@ public class DigibyteACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -578,13 +587,14 @@ public class DigibyteACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -597,8 +607,13 @@ public class DigibyteACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -13,6 +13,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class Dogecoin extends Bitcoiny {
@@ -60,7 +61,7 @@ public class Dogecoin extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -110,14 +111,14 @@ public class Dogecoin extends Bitcoiny {
}
};
private long feeCeiling = MAINNET_FEE;
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -179,13 +180,13 @@ public class Dogecoin extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.dogecoinNet.getFeeCeiling();
public long getFeeRequired() {
return this.dogecoinNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.dogecoinNet.setFeeCeiling( fee );
this.dogecoinNet.setFeeRequired( fee );
}
}

View File

@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -566,7 +568,14 @@ public class DogecoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -575,13 +584,14 @@ public class DogecoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -594,8 +604,13 @@ public class DogecoinACCTv1 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -569,7 +571,14 @@ public class DogecoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -578,13 +587,14 @@ public class DogecoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -597,8 +607,13 @@ public class DogecoinACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -644,8 +644,10 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
}
/**
* Performs RPC call, with automatic reconnection to different server if needed.
* <p>
* <p>Performs RPC call, with automatic reconnection to different server if needed.
* </p>
* @param method String representation of the RPC call value
* @param params a list of Objects passed to the method of the Remote Server
* @return "result" object from within JSON output
* @throws ForeignBlockchainException if server returns error or something goes wrong
*/

View File

@@ -184,6 +184,11 @@ public class LegacyZcashAddress extends Address {
return p2sh ? ScriptType.P2SH : ScriptType.P2PKH;
}
@Override
public int compareTo(Address address) {
return this.toString().compareTo(address.toString());
}
/**
* Given an address, examines the version byte and attempts to find a matching NetworkParameters. If you aren't sure
* which network the address is intended for (eg, it was provided by a user), you can use this to decide if it is

View File

@@ -14,6 +14,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class Litecoin extends Bitcoiny {
@@ -63,7 +64,7 @@ public class Litecoin extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -116,14 +117,14 @@ public class Litecoin extends Bitcoiny {
}
};
private long feeCeiling = MAINNET_FEE;
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -185,13 +186,13 @@ public class Litecoin extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.litecoinNet.getFeeCeiling();
public long getFeeRequired() {
return this.litecoinNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.litecoinNet.setFeeCeiling( fee );
this.litecoinNet.setFeeRequired( fee );
}
}

View File

@@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
import com.google.common.primitives.Bytes;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -559,7 +561,14 @@ public class LitecoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -568,13 +577,14 @@ public class LitecoinACCTv1 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -587,8 +597,13 @@ public class LitecoinACCTv1 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
import com.google.common.primitives.Bytes;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -562,7 +564,14 @@ public class LitecoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -571,13 +580,14 @@ public class LitecoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -590,8 +600,13 @@ public class LitecoinACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -3,18 +3,19 @@ package org.qortal.crosschain;
import org.bitcoinj.core.NetworkParameters;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicLong;
public class NetTBD {
private String name;
private long feeCeiling;
private AtomicLong feeRequired;
private NetworkParameters params;
private Collection<ElectrumX.Server> servers;
private String genesisHash;
public NetTBD(String name, long feeCeiling, NetworkParameters params, Collection<ElectrumX.Server> servers, String genesisHash) {
public NetTBD(String name, long feeRequired, NetworkParameters params, Collection<ElectrumX.Server> servers, String genesisHash) {
this.name = name;
this.feeCeiling = feeCeiling;
this.feeRequired = new AtomicLong(feeRequired);
this.params = params;
this.servers = servers;
this.genesisHash = genesisHash;
@@ -25,14 +26,14 @@ public class NetTBD {
return this.name;
}
public long getFeeCeiling() {
public long getFeeRequired() {
return feeCeiling;
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
public void setFeeRequired(long feeRequired) {
this.feeCeiling = feeCeiling;
this.feeRequired.set(feeRequired);
}
public NetworkParameters getParams() {

View File

@@ -21,6 +21,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;
public class PirateChain extends Bitcoiny {
@@ -51,12 +52,7 @@ public class PirateChain extends Bitcoiny {
public Collection<Server> getServers() {
return Arrays.asList(
// Servers chosen on NO BASIS WHATSOEVER from various sources!
new Server("lightd.pirate.black", Server.ConnectionType.SSL, 443),
new Server("wallet-arrr1.qortal.online", Server.ConnectionType.SSL, 443),
new Server("wallet-arrr2.qortal.online", Server.ConnectionType.SSL, 443),
new Server("wallet-arrr3.qortal.online", Server.ConnectionType.SSL, 443),
new Server("wallet-arrr4.qortal.online", Server.ConnectionType.SSL, 443),
new Server("wallet-arrr5.qortal.online", Server.ConnectionType.SSL, 443)
new Server("lightd.pirate.black", Server.ConnectionType.SSL, 443)
);
}
@@ -67,7 +63,7 @@ public class PirateChain extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -117,14 +113,14 @@ public class PirateChain extends Bitcoiny {
}
};
private long feeCeiling = MAINNET_FEE;
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -186,14 +182,14 @@ public class PirateChain extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.pirateChainNet.getFeeCeiling();
public long getFeeRequired() {
return this.pirateChainNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.pirateChainNet.setFeeCeiling( fee );
this.pirateChainNet.setFeeRequired( fee );
}
/**
* Returns confirmed balance, based on passed payment script.

View File

@@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
import com.google.common.primitives.Bytes;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -580,7 +582,14 @@ public class PirateChainACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -589,13 +598,14 @@ public class PirateChainACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -608,8 +618,13 @@ public class PirateChainACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -8,6 +8,7 @@ import org.bouncycastle.util.encoders.DecoderException;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.controller.PirateChainWalletController;
import org.qortal.crypto.Crypto;
import org.qortal.settings.Settings;
@@ -67,8 +68,8 @@ public class PirateWallet {
}
// Pick a random server
PirateLightClient.Server server = this.getRandomServer();
String serverUri = String.format("https://%s:%d/", server.hostname, server.port);
ChainableServer server = PirateChain.getInstance().blockchainProvider.getCurrentServer();
String serverUri = String.format("https://%s:%d/", server.getHostName(), server.getPort());
// Pirate library uses base64 encoding
String entropy64 = Base64.toBase64String(this.entropyBytes);

View File

@@ -14,6 +14,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class Ravencoin extends Bitcoiny {
@@ -61,7 +62,7 @@ public class Ravencoin extends Bitcoiny {
@Override
public long getP2shFee(Long timestamp) {
return this.getFeeCeiling();
return this.getFeeRequired();
}
},
TEST3 {
@@ -111,14 +112,14 @@ public class Ravencoin extends Bitcoiny {
}
};
private long feeCeiling = MAINNET_FEE;
private AtomicLong feeRequired = new AtomicLong( MAINNET_FEE );
public long getFeeCeiling() {
return feeCeiling;
public long getFeeRequired() {
return feeRequired.get();
}
public void setFeeCeiling(long feeCeiling) {
this.feeCeiling = feeCeiling;
public void setFeeRequired(long feeRequired) {
this.feeRequired.set(feeRequired);
}
public abstract NetworkParameters getParams();
@@ -180,13 +181,13 @@ public class Ravencoin extends Bitcoiny {
}
@Override
public long getFeeCeiling() {
return this.ravencoinNet.getFeeCeiling();
public long getFeeRequired() {
return this.ravencoinNet.getFeeRequired();
}
@Override
public void setFeeCeiling(long fee) {
public void setFeeRequired(long fee) {
this.ravencoinNet.setFeeCeiling( fee );
this.ravencoinNet.setFeeRequired( fee );
}
}

View File

@@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.ciyam.at.*;
import org.qortal.account.Account;
import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.at.QortalFunctionCode;
import org.qortal.crypto.Crypto;
@@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.OptionalLong;
import static org.ciyam.at.OpCode.calcOffset;
@@ -569,7 +571,14 @@ public class RavencoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
@Override
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
return crossChainTradeDataList;
}
/**
@@ -578,13 +587,14 @@ public class RavencoinACCTv3 implements ACCT {
@Override
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
}
/**
* Returns CrossChainTradeData with useful info extracted from AT.
*/
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
@Override
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
byte[] addressBytes = new byte[25]; // for general use
String atAddress = atStateData.getATAddress();
@@ -597,8 +607,13 @@ public class RavencoinACCTv3 implements ACCT {
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
tradeData.creationTimestamp = creationTimestamp;
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
if(optionalBalance.isPresent()) {
tradeData.qortBalance = optionalBalance.getAsLong();
}
else {
Account atAccount = new Account(repository, atAddress);
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
}
byte[] stateData = atStateData.getStateData();
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);

View File

@@ -100,7 +100,7 @@ public class AES {
// Prepend the output stream with the 16 byte initialization vector
outputStream.write(iv.getIV());
byte[] buffer = new byte[1024];
byte[] buffer = new byte[65536];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
byte[] output = cipher.update(buffer, 0, bytesRead);
@@ -138,7 +138,7 @@ public class AES {
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv));
byte[] buffer = new byte[64];
byte[] buffer = new byte[65536];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
byte[] output = cipher.update(buffer, 0, bytesRead);

View File

@@ -0,0 +1,57 @@
package org.qortal.data.crosschain;
import org.json.JSONObject;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
public class ForeignFeeData {
private String blockchain;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private long fee;
protected ForeignFeeData() {
/* JAXB */
}
public ForeignFeeData(String blockchain,
long fee) {
this.blockchain = blockchain;
this.fee = fee;
}
public String getBlockchain() {
return this.blockchain;
}
public long getFee() {
return this.fee;
}
public JSONObject toJson() {
JSONObject jsonObject = new JSONObject();
jsonObject.put("blockchain", this.getBlockchain());
jsonObject.put("fee", this.getFee());
return jsonObject;
}
public static ForeignFeeData fromJson(JSONObject json) {
return new ForeignFeeData(
json.isNull("blockchain") ? null : json.getString("blockchain"),
json.isNull("fee") ? null : json.getLong("fee")
);
}
@Override
public String toString() {
return "ForeignFeeData{" +
"blockchain='" + blockchain + '\'' +
", fee=" + fee +
'}';
}
}

View File

@@ -0,0 +1,90 @@
package org.qortal.data.crosschain;
import org.json.JSONObject;
import org.qortal.data.account.MintingAccountData;
import org.qortal.utils.Base58;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.util.Objects;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
public class ForeignFeeDecodedData {
protected long timestamp;
protected byte[] data;
protected String atAddress;
protected Integer fee;
// Constructors
// necessary for JAXB serialization
protected ForeignFeeDecodedData() {
}
public ForeignFeeDecodedData(long timestamp, byte[] data, String atAddress, Integer fee) {
this.timestamp = timestamp;
this.data = data;
this.atAddress = atAddress;
this.fee = fee;
}
public long getTimestamp() {
return this.timestamp;
}
public byte[] getData() {
return this.data;
}
public String getAtAddress() {
return atAddress;
}
public Integer getFee() {
return this.fee;
}
// Comparison
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ForeignFeeDecodedData that = (ForeignFeeDecodedData) o;
return timestamp == that.timestamp && Objects.equals(atAddress, that.atAddress) && Objects.equals(fee, that.fee);
}
@Override
public int hashCode() {
return Objects.hash(timestamp, atAddress, fee);
}
@Override
public String toString() {
return "ForeignFeeDecodedData{" +
"timestamp=" + timestamp +
", atAddress='" + atAddress + '\'' +
", fee=" + fee +
'}';
}
public JSONObject toJson() {
JSONObject jsonObject = new JSONObject();
jsonObject.put("data", Base58.encode(this.data));
jsonObject.put("atAddress", this.atAddress);
jsonObject.put("timestamp", this.timestamp);
jsonObject.put("fee", this.fee);
return jsonObject;
}
public static ForeignFeeDecodedData fromJson(JSONObject json) {
return new ForeignFeeDecodedData(
json.isNull("timestamp") ? null : json.getLong("timestamp"),
json.isNull("data") ? null : Base58.decode(json.getString("data")),
json.isNull("atAddress") ? null : json.getString("atAddress"),
json.isNull("fee") ? null : json.getInt("fee"));
}
}

View File

@@ -0,0 +1,69 @@
package org.qortal.data.crosschain;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.util.Objects;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
public class ForeignFeeEncodedData {
protected long timestamp;
protected String data;
protected String atAddress;
protected Integer fee;
// Constructors
// necessary for JAXB serialization
protected ForeignFeeEncodedData() {
}
public ForeignFeeEncodedData(long timestamp, String data, String atAddress, Integer fee) {
this.timestamp = timestamp;
this.data = data;
this.atAddress = atAddress;
this.fee = fee;
}
public long getTimestamp() {
return this.timestamp;
}
public String getData() {
return this.data;
}
public String getAtAddress() {
return atAddress;
}
public Integer getFee() {
return this.fee;
}
// Comparison
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ForeignFeeEncodedData that = (ForeignFeeEncodedData) o;
return timestamp == that.timestamp && Objects.equals(atAddress, that.atAddress) && Objects.equals(fee, that.fee);
}
@Override
public int hashCode() {
return Objects.hash(timestamp, atAddress, fee);
}
@Override
public String toString() {
return "ForeignFeeDecodedData{" +
"timestamp=" + timestamp +
", atAddress='" + atAddress + '\'' +
", fee=" + fee +
'}';
}
}

View File

@@ -0,0 +1,29 @@
package org.qortal.data.crosschain;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class UnsignedFeeEvent {
private boolean positive;
private String address;
public UnsignedFeeEvent() {
}
public UnsignedFeeEvent(boolean positive, String address) {
this.positive = positive;
this.address = address;
}
public boolean isPositive() {
return positive;
}
public String getAddress() {
return address;
}
}

View File

@@ -67,6 +67,11 @@ public class NameData {
this(name, reducedName, owner, data, registered, null, false, null, reference, creationGroupId);
}
// Typically used for name summsry
public NameData(String name, String owner) {
this(name, null, owner, null, 0L, null, false, null, null, 0);
}
// Getters / setters
public String getName() {

View File

@@ -0,0 +1,30 @@
package org.qortal.event;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class FeeWaitingEvent implements Event{
private boolean positive;
private String address;
public FeeWaitingEvent() {
}
public FeeWaitingEvent(boolean positive, String address) {
this.positive = positive;
this.address = address;
}
public boolean isPositive() {
return positive;
}
public String getAddress() {
return address;
}
}

View File

@@ -0,0 +1,4 @@
package org.qortal.event;
public class LockingFeeUpdateEvent implements Event{
}

View File

@@ -0,0 +1,15 @@
package org.qortal.event;
import org.qortal.crosschain.Bitcoiny;
public class RequiredFeeUpdateEvent implements Event{
private final Bitcoiny bitcoiny;
public RequiredFeeUpdateEvent(Bitcoiny bitcoiny) {
this.bitcoiny = bitcoiny;
}
public Bitcoiny getBitcoiny() {
return bitcoiny;
}
}

View File

@@ -714,6 +714,7 @@ public class Network {
// We can't block here so use tryRepository(). We don't NEED to connect a new peer.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.getConnectablePeer()");
return null;
}
@@ -982,7 +983,7 @@ public class Network {
if (maxThreadsForMessageType != null) {
Integer threadCount = threadsPerMessageType.get(message.getType());
if (threadCount != null && threadCount >= maxThreadsForMessageType) {
LOGGER.trace("Discarding {} message as there are already {} active threads", message.getType().name(), threadCount);
LOGGER.warn("Discarding {} message as there are already {} active threads", message.getType().name(), threadCount);
return;
}
}
@@ -1499,6 +1500,7 @@ public class Network {
// Pruning peers isn't critical so no need to block for a repository instance.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.prunePeers()");
return;
}
@@ -1567,6 +1569,7 @@ public class Network {
// Merging peers isn't critical so don't block for a repository instance.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.opportunisticMergePeers()");
return;
}

View File

@@ -640,10 +640,13 @@ public class Peer {
return false;
try {
this.outputBuffer = ByteBuffer.wrap(message.toBytes());
byte[] messageBytes = message.toBytes();
this.outputBuffer = ByteBuffer.wrap(messageBytes);
this.outputMessageType = message.getType().name();
this.outputMessageId = message.getId();
LOGGER.trace("[{}] Sending {} message with ID {} to peer {}",
this.peerConnectionId, this.outputMessageType, this.outputMessageId, this);
@@ -662,12 +665,22 @@ public class Peer {
// If output byte buffer is not null, send from that
int bytesWritten = this.socketChannel.write(outputBuffer);
LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {} ({} total)", this.peerConnectionId,
bytesWritten, this.outputMessageType, this.outputMessageId, this, outputBuffer.limit());
int zeroSendCount = 0;
// If we've sent 0 bytes then socket buffer is full so we need to wait until it's empty again
if (bytesWritten == 0) {
return true;
while (bytesWritten == 0) {
if (zeroSendCount > 9) {
LOGGER.debug("Socket write stuck for too long, returning");
return true;
}
try {
Thread.sleep(10); // 10MS CPU Sleep to try and give it time to flush the socket
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false; // optional, if you want to signal shutdown
}
zeroSendCount++;
bytesWritten = this.socketChannel.write(outputBuffer);
}
// If we then exhaust the byte buffer, set it to null (otherwise loop and try to send more)
@@ -723,13 +736,18 @@ public class Peer {
* @return <code>true</code> if message successfully sent; <code>false</code> otherwise
*/
public boolean sendMessageWithTimeout(Message message, int timeout) {
return PeerSendManagement.getInstance().getOrCreateSendManager(this).queueMessage(message, timeout);
}
public boolean sendMessageWithTimeoutNow(Message message, int timeout) {
if (!this.socketChannel.isOpen()) {
return false;
}
try {
// Queue message, to be picked up by ChannelWriteTask and then peer.writeChannel()
LOGGER.trace("[{}] Queuing {} message with ID {} to peer {}", this.peerConnectionId,
LOGGER.debug("[{}] Queuing {} message with ID {} to peer {}", this.peerConnectionId,
message.getType().name(), message.getId(), this);
// Check message properly constructed

View File

@@ -0,0 +1,55 @@
package org.qortal.network;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class PeerSendManagement {
private static final Logger LOGGER = LogManager.getLogger(PeerSendManagement.class);
private final Map<String, PeerSendManager> peerSendManagers = new ConcurrentHashMap<>();
public PeerSendManager getOrCreateSendManager(Peer peer) {
return peerSendManagers.computeIfAbsent(peer.toString(), p -> new PeerSendManager(peer));
}
private PeerSendManagement() {
ScheduledExecutorService cleaner = Executors.newSingleThreadScheduledExecutor();
cleaner.scheduleAtFixedRate(() -> {
long idleCutoff = TimeUnit.MINUTES.toMillis(2);
Iterator<Map.Entry<String, PeerSendManager>> iterator = peerSendManagers.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, PeerSendManager> entry = iterator.next();
PeerSendManager manager = entry.getValue();
if (manager.isIdle(idleCutoff)) {
iterator.remove(); // SAFE removal during iteration
manager.shutdown();
LOGGER.debug("Cleaned up PeerSendManager for peer {}", entry.getKey());
}
}
}, 0, 5, TimeUnit.MINUTES);
}
private static PeerSendManagement instance;
public static PeerSendManagement getInstance() {
if( instance == null ) {
instance = new PeerSendManagement();
}
return instance;
}
}

View File

@@ -0,0 +1,138 @@
package org.qortal.network;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.network.message.Message;
public class PeerSendManager {
private static final Logger LOGGER = LogManager.getLogger(PeerSendManager.class);
private static final int MAX_FAILURES = 15;
private static final int MAX_MESSAGE_ATTEMPTS = 2;
private static final int RETRY_DELAY_MS = 100;
private static final long MAX_QUEUE_DURATION_MS = 20_000;
private static final long COOLDOWN_DURATION_MS = 20_000;
private final Peer peer;
private final BlockingQueue<TimedMessage> queue = new LinkedBlockingQueue<>();
private final ExecutorService executor;
private final AtomicInteger failureCount = new AtomicInteger(0);
private static final AtomicInteger threadCount = new AtomicInteger(1);
private volatile boolean coolingDown = false;
private volatile long lastUsed = System.currentTimeMillis();
public PeerSendManager(Peer peer) {
this.peer = peer;
this.executor = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r);
t.setName("PeerSendManager-" + peer.getResolvedAddress().getHostString() + "-" + threadCount.getAndIncrement());
return t;
});
start();
}
private void start() {
executor.submit(() -> {
while (!Thread.currentThread().isInterrupted()) {
try {
TimedMessage timedMessage = queue.take();
long age = System.currentTimeMillis() - timedMessage.timestamp;
if (age > MAX_QUEUE_DURATION_MS) {
LOGGER.debug("Dropping stale message {} ({}ms old)", timedMessage.message.getId(), age);
continue;
}
Message message = timedMessage.message;
int timeout = timedMessage.timeout;
boolean success = false;
for (int attempt = 1; attempt <= MAX_MESSAGE_ATTEMPTS; attempt++) {
try {
if (peer.sendMessageWithTimeoutNow(message, timeout)) {
success = true;
failureCount.set(0); // reset on success
break;
}
} catch (Exception e) {
LOGGER.debug("Attempt {} failed for message {} to peer {}: {}", attempt, message.getId(), peer, e.getMessage());
}
Thread.sleep(RETRY_DELAY_MS);
}
if (!success) {
int totalFailures = failureCount.incrementAndGet();
LOGGER.debug("Failed to send message {} to peer {}. Total failures: {}", message.getId(), peer, totalFailures);
if (totalFailures >= MAX_FAILURES) {
LOGGER.debug("Peer {} exceeded failure limit ({}). Disconnecting...", peer, totalFailures);
peer.disconnect("Too many message send failures");
coolingDown = true;
queue.clear();
try {
Thread.sleep(COOLDOWN_DURATION_MS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
} finally {
coolingDown = false;
failureCount.set(0);
}
}
}
Thread.sleep(50); // small throttle
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
} catch (Exception e) {
LOGGER.error("Unexpected error in PeerSendManager for peer {}: {}", peer, e.getMessage(), e);
}
}
});
}
public boolean queueMessage(Message message, int timeout) {
if (coolingDown) {
LOGGER.debug("In cooldown, ignoring message {}", message.getId());
return false;
}
lastUsed = System.currentTimeMillis();
if (!queue.offer(new TimedMessage(message, timeout))) {
LOGGER.debug("Send queue full, dropping message {}", message.getId());
return false;
}
return true;
}
public boolean isIdle(long cutoffMillis) {
return System.currentTimeMillis() - lastUsed > cutoffMillis;
}
public void shutdown() {
queue.clear();
executor.shutdownNow();
}
private static class TimedMessage {
final Message message;
final long timestamp;
final int timeout;
TimedMessage(Message message, int timeout) {
this.message = message;
this.timestamp = System.currentTimeMillis();
this.timeout = timeout;
}
}
}

View File

@@ -0,0 +1,43 @@
package org.qortal.network.message;
import org.qortal.data.crosschain.ForeignFeeDecodedData;
import org.qortal.utils.ForeignFeesMessageUtils;
import java.nio.ByteBuffer;
import java.util.List;
/**
* For sending online accounts info to remote peer.
*
* Same format as V2, but with added support for a mempow nonce.
*/
public class ForeignFeesMessage extends Message {
public static final long MIN_PEER_VERSION = 0x300060000L; // 3.6.0
private List<ForeignFeeDecodedData> foreignFees;
public ForeignFeesMessage(List<ForeignFeeDecodedData> foreignFeeDecodedData) {
super(MessageType.FOREIGN_FEES);
this.dataBytes = ForeignFeesMessageUtils.fromDataToSendBytes(foreignFeeDecodedData);
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
private ForeignFeesMessage(int id, List<ForeignFeeDecodedData> foreignFees) {
super(id, MessageType.FOREIGN_FEES);
this.foreignFees = foreignFees;
}
public List<ForeignFeeDecodedData> getForeignFees() {
return this.foreignFees;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws MessageException {
List<ForeignFeeDecodedData> foreignFeeDecodedData = ForeignFeesMessageUtils.fromSendBytesToData(bytes);
return new ForeignFeesMessage(id, foreignFeeDecodedData);
}
}

View File

@@ -0,0 +1,46 @@
package org.qortal.network.message;
import org.qortal.data.crosschain.ForeignFeeDecodedData;
import org.qortal.utils.ForeignFeesMessageUtils;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class GetForeignFeesMessage extends Message {
private static final Map<Long, Map<Byte, byte[]>> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap();
private final List<ForeignFeeDecodedData> foreignFeeDecodedData;
public GetForeignFeesMessage(List<ForeignFeeDecodedData> foreignFeeDecodedData) {
super(MessageType.GET_FOREIGN_FEES);
this.foreignFeeDecodedData = foreignFeeDecodedData;
// If we don't have ANY online accounts then it's an easier construction...
if (foreignFeeDecodedData.isEmpty()) {
this.dataBytes = EMPTY_DATA_BYTES;
return;
}
this.dataBytes = ForeignFeesMessageUtils.fromDataToGetBytes(foreignFeeDecodedData);
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
private GetForeignFeesMessage(int id, List<ForeignFeeDecodedData> foreignFeeDecodedData) {
super(id, MessageType.GET_FOREIGN_FEES);
this.foreignFeeDecodedData = foreignFeeDecodedData;
}
public List<ForeignFeeDecodedData> getForeignFeeData() {
return foreignFeeDecodedData;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) {
return new GetForeignFeesMessage(id, ForeignFeesMessageUtils.fromGetBytesToData(bytes));
}
}

View File

@@ -79,7 +79,10 @@ public enum MessageType {
GET_NAME(182, GetNameMessage::fromByteBuffer),
TRANSACTIONS(190, TransactionsMessage::fromByteBuffer),
GET_ACCOUNT_TRANSACTIONS(191, GetAccountTransactionsMessage::fromByteBuffer);
GET_ACCOUNT_TRANSACTIONS(191, GetAccountTransactionsMessage::fromByteBuffer),
FOREIGN_FEES( 200, ForeignFeesMessage::fromByteBuffer),
GET_FOREIGN_FEES( 201, GetForeignFeesMessage::fromByteBuffer);
public final int value;
public final MessageProducer fromByteBufferMethod;

View File

@@ -31,8 +31,28 @@ public class ChannelWriteTask implements Task {
@Override
public void perform() throws InterruptedException {
try {
boolean isSocketClogged = peer.writeChannel();
boolean isSocketClogged;
int clogCounter = 0;
do {
isSocketClogged = peer.writeChannel();
if (clogCounter > 9) {
LOGGER.warn("10 Socket Clogs - GIVING UP");
break;
}
if (isSocketClogged) {
LOGGER.debug(
"socket is clogged: peer = {} {}, retrying",
peer.getPeerData().getAddress().toString(),
Thread.currentThread().getName()
);
Thread.sleep(1000);
clogCounter++;
}
} while( isSocketClogged );
// Tell Network that we've finished
Network.getInstance().notifyChannelNotWriting(socketChannel);
@@ -49,4 +69,4 @@ public class ChannelWriteTask implements Task {
peer.disconnect("I/O error");
}
}
}
}

View File

@@ -14,6 +14,8 @@ public interface ATRepository {
/** Returns ATData using AT's address or null if none found */
public ATData fromATAddress(String atAddress) throws DataException;
public List<ATData> fromATAddresses(List<String> atAddresses) throws DataException;
/** Returns where AT with passed address exists in repository */
public boolean exists(String atAddress) throws DataException;
@@ -62,6 +64,8 @@ public interface ATRepository {
*/
public ATStateData getLatestATState(String atAddress) throws DataException;
public List<ATStateData> getLatestATStates(List<String> collect) throws DataException;
/**
* Returns final ATStateData for ATs matching codeHash (required)
* and specific data segment value (optional).

View File

@@ -130,6 +130,8 @@ public interface AccountRepository {
*/
public AccountBalanceData getBalance(String address, long assetId) throws DataException;
public List<AccountBalanceData> getBalances(List<String> addresses, long assetId) throws DataException;
/** Returns all account balances for given assetID, optionally excluding zero balances. */
public List<AccountBalanceData> getAssetBalances(long assetId, Boolean excludeZero) throws DataException;

Some files were not shown because too many files have changed in this diff Show More