Compare commits

...

990 Commits

Author SHA1 Message Date
CalDescent
94d3664cb0 Bump version to 4.2.1 2023-07-31 19:30:45 +01:00
CalDescent
f5c8dfe766 Added maxTransactionsPerBlock setting (default 25) to reduce minting load on slower machines.
This is a short term limit, is well above current usage levels, and can be increased substantially in future once the block minter code has been improved.
2023-07-31 19:25:26 +01:00
CalDescent
f7e1f2fca8 Increased timeout for SEARCH_QDN_RESOURCES from 10 to 30 seconds. 2023-07-28 21:47:29 +01:00
CalDescent
811b647c88 Catch UnsupportedAddressTypeException and fall back to IPv4 binding. 2023-07-28 18:58:47 +01:00
CalDescent
3215bb638d More online accounts improvements 2023-07-22 10:44:41 +01:00
CalDescent
8ae7a1d65b Removed (Get)OnlineAccountsV1 and V2, as these are no longer used. 2023-07-21 14:28:47 +01:00
CalDescent
29dcd53002 Revert "Improved filtering of online accounts data."
This reverts commit c14fca5660.
2023-07-16 20:04:45 +01:00
CalDescent
62908f867a Bump version to 4.2.0 2023-07-16 19:09:08 +01:00
CalDescent
5f86ecafd9 Refactored developer proxy, and modified IPv6 fallback so that it only occurs on a connection failure. 2023-07-09 12:35:46 +01:00
CalDescent
fe999a11f4 Include CANCEL_SELL_NAME transactions when performing a complete rebuild of names. 2023-07-08 11:06:33 +01:00
CalDescent
c14fca5660 Improved filtering of online accounts data. 2023-07-08 11:05:14 +01:00
CalDescent
fd8d720946 Added support for group encryption in service validation. 2023-06-23 13:32:23 +01:00
CalDescent
d628b3ab2a Renamed the "usePrefix" parameter to "includeResourceIdInPrefix", and slightly modified its functionality. 2023-06-17 13:04:46 +01:00
CalDescent
5928b54a33 Added developer QDN proxy.
This allows Q-Apps and websites to be developed and tested in real time, by proxying an existing webserver such as localhost:5173 from React/Vite. The proxy adds all QDN functionality to the existing server in real time. Needs UI integration before all features can be used.
2023-06-17 13:03:29 +01:00
QuickMythril
91dfc5efd0 Merge pull request #119 from QuickMythril/upgrade-tls
Upgraded to TLSv1.3
2023-06-14 10:25:25 -04:00
QuickMythril
1343a88ee3 Merge pull request #124 from QuickMythril/translate-jp
Added Japanese translations (Credit: R M)
2023-06-02 08:01:04 -04:00
QuickMythril
7f7b02f003 Updated Japanese translations (Credit: R M) 2023-06-02 06:17:48 -04:00
QuickMythril
5650923805 Merge pull request #123 from QuickMythril/2023-05-25
Added some fee info & Q-App support for foreign coins
2023-05-31 09:14:46 -04:00
CalDescent
5fb2640a3a Bump version to 4.1.3 2023-05-30 18:05:05 +01:00
CalDescent
66c91fd365 MIN_PEER_VERSION for handshake set to 4.1.1 2023-05-30 17:42:31 +01:00
CalDescent
bfc03db6a9 Default minPeerVersion set to 4.1.2 2023-05-30 17:41:39 +01:00
QuickMythril
a4bb445f3e Added Japanese translations for review 2023-05-29 04:23:22 -04:00
QuickMythril
27afcf12bf Prepared files for Japanese translations 2023-05-29 04:07:52 -04:00
CalDescent
eda6ab5701 Fixed some failing unit tests, and ignored some failing BTC ones that have been superseded by LTC. 2023-05-26 18:01:09 +02:00
QuickMythril
13da0e8a7a Adjusted fee info to long format 2023-05-25 17:26:29 -04:00
QuickMythril
d260c0a9a9 Updated info on foreign coin fees 2023-05-25 16:35:08 -04:00
QuickMythril
655073c524 Added 2m timeout for GET_WALLET_BALANCE action 2023-05-25 04:41:03 -04:00
crowetic
c8f3b6918f Update start.sh
Added better defaults for JVM_MEMORY_ARGS and description as to how and when to uncomment the line.
2023-05-24 16:20:05 -07:00
CalDescent
1565a461ac Bump version to 4.1.2 2023-05-24 20:01:18 +01:00
CalDescent
1f30bef4f8 defaultArchiveVersion set to 2 2023-05-24 19:54:36 +01:00
CalDescent
6f0479c4fc Default minPeerVersion set to 4.1.1 2023-05-24 19:47:37 +01:00
CalDescent
b967800a3e Default repositoryConnectionPoolSize set to 240 2023-05-24 19:47:25 +01:00
CalDescent
0b50f965cc Default maxNetworkThreadPoolSize set to 120 2023-05-24 19:47:10 +01:00
CalDescent
90f7cee058 Bump version to 4.1.1 2023-05-21 20:34:04 +01:00
CalDescent
947b523e61 Limit query to 100 so that it doesn't return endless amounts of transaction signatures.
Using a separate database method for now to reduce risk of interfering with other parts of the code which use it. It can be combined later when there is more testing time.
2023-05-21 20:33:33 +01:00
CalDescent
95d72866e9 Use a better method to detect if a transactions table in need of a rebuild.
Should handle cases where a previous rebuild didn't fully complete, or missed a block.
2023-05-21 20:06:09 +01:00
CalDescent
aea1cc62c8 Fixed off-by-one bug (correctly this time) 2023-05-21 20:02:58 +01:00
CalDescent
c763445e6e Log to console if an extra core restart is needed to complete the update process (this needed ins some cases after bootstrapping). 2023-05-21 19:51:22 +01:00
CalDescent
7a6b83aa22 Bump version to 4.1.0 2023-05-21 16:49:59 +01:00
CalDescent
ba555174ba Merge branch 'master' into block-sequence 2023-05-21 15:35:50 +01:00
CalDescent
3763035d4a Default recoveryModeTimeout increased to 24 hours for now.
It doesn't quite work as intended, so it's best that it doesn't interfere right away. 24 hours should be long enough for any issues to be manually resolved.
2023-05-21 15:34:27 +01:00
CalDescent
b1a904a3c7 MIN_PEER_VERSION set to 4.0.0 2023-05-21 15:26:49 +01:00
CalDescent
3c4c5a1457 Default minPeerVersion set to 4.0.0 2023-05-21 15:22:24 +01:00
CalDescent
648fa66f6a Increased default maxPeers to 40. 2023-05-21 15:22:00 +01:00
CalDescent
072aa469e3 Reduce default minBlockchainPeers to 3, ahead of the upcoming reshape. 2023-05-21 15:21:04 +01:00
CalDescent
2b2d6f4e52 Updated message. 2023-05-21 14:02:45 +01:00
CalDescent
c6456669e2 Don't allow core to start if transaction sequences haven't been rebuilt yet. 2023-05-21 12:33:37 +01:00
CalDescent
a74fa15d60 Missing import 2023-05-21 12:31:49 +01:00
CalDescent
68b99c8643 Update status when rebuilding transaction sequences. 2023-05-21 12:28:51 +01:00
CalDescent
b9015217de Fixed bug causing final block to be missed in the reshape. 2023-05-21 11:05:34 +01:00
CalDescent
e1043ceacb Fixed bug causing duplicate AT entries in local array. 2023-05-21 08:41:56 +01:00
CalDescent
8b51590844 Include AT transactions when rebuilding transaction sequences, as these aren't directly included in the block archive. 2023-05-20 20:54:22 +01:00
CalDescent
a8d92805f9 Added extra check for topOnly mode. 2023-05-20 11:33:43 +01:00
CalDescent
2cc5b90306 Merge branch 'master' into block-sequence 2023-05-14 17:28:27 +01:00
CalDescent
4cb755a2f1 Added GET /stats/supply/circulating API endpoint, to fetch total QORT minted so far. 2023-05-13 13:48:27 +01:00
CalDescent
92119b5558 Increased per-name limit for followed names by 4x. 2023-05-12 20:14:14 +01:00
CalDescent
8a1bf8b5ec Return full name data in GET /names. 2023-05-12 11:41:15 +01:00
CalDescent
f8233bd05b Added optional after parameter to GET /names. 2023-05-12 11:41:00 +01:00
CalDescent
29480e5664 Added SEARCH_NAMES Q-App action. 2023-05-12 11:17:09 +01:00
CalDescent
5a873f9465 Added prefix parameter to GET /names/search. 2023-05-12 11:11:34 +01:00
CalDescent
dc1289787d Ignore per-name limits when using storagePolicy ALL. 2023-05-12 10:12:38 +01:00
CalDescent
ba4866a2e6 Added GET /crosschain/tradeoffers/hidden endpoint, to show offers that are currently being hidden.
This uses the maxTradeOfferAttempts setting, so modifying this setting will affect the number of offers that are returned.
2023-05-12 10:01:38 +01:00
CalDescent
2cbc5aabd5 Added maxTradeOfferAttempts setting (default 3).
Offers with more than 3 failures will be hidden from the API and websocket, to prevent unbuyable offers from staying in the order books and continuously failing. maxTradeOfferAttempts can be optionally increased on a node to show more trades that would otherwise be hidden.
2023-05-12 09:59:30 +01:00
QuickMythril
e3be43a1e6 Changed get name API call to use reduced name 2023-05-11 12:31:00 -04:00
QuickMythril
1e10bcf3b0 Merge branch 'Qortal:master' into upgrade-tls 2023-05-09 15:38:20 -04:00
QuickMythril
a575ea4423 Merge pull request #120 from QuickMythril/get-votes-api
Created get votes API call
2023-05-09 15:34:54 -04:00
QuickMythril
3e45948646 Added get votes option to return only counts 2023-05-08 23:41:31 -04:00
QuickMythril
49c0d45bc6 Added count to get votes API call 2023-05-08 23:26:23 -04:00
QuickMythril
cda32a47f1 Added API call to get votes 2023-05-08 20:23:54 -04:00
CalDescent
49063e54ec Bump version to 4.0.3 2023-05-08 19:18:38 +01:00
CalDescent
df3c68679f Log the action to the console, instead of the entire event. 2023-05-08 14:43:00 +01:00
CalDescent
81788610c4 Merge branch 'master' of github.com:Qortal/qortal 2023-05-08 12:18:34 +01:00
CalDescent
fc10b61193 Fixed slow validation issue caused by loading the entire resource into memory. 2023-05-08 12:17:44 +01:00
CalDescent
05b4ecd4ed Updated documentation. 2023-05-08 12:16:17 +01:00
CalDescent
aba589c0e0 Added optional "build" parameter to GET_QDN_RESOURCE_STATUS.
This triggers an async build when checking the status.
2023-05-08 12:15:53 +01:00
CalDescent
c682fa89fd Avoid duplicate concurrent QDN builds. 2023-05-08 12:14:00 +01:00
CalDescent
21d1750779 Added more debug logging when building resources. 2023-05-08 12:13:12 +01:00
CalDescent
923e90ebed Fixed occasional NPE 2023-05-08 12:12:40 +01:00
catbref
9490c62242 Improved tx.pl that supports local signing via openssl and "deploy_at" transaction type + other minor fixes 2023-05-08 12:07:02 +01:00
CalDescent
c941bc6024 Catch and log all exceptions when publishing data. 2023-05-07 11:19:42 +01:00
CalDescent
0acf0729e9 Bump version to 4.0.2 2023-05-06 15:10:46 +01:00
CalDescent
1f77ee535f Added link to example Q-App projects. 2023-05-06 12:16:59 +01:00
CalDescent
b693a514fd Fixed warnings, and other improvements. 2023-05-06 12:13:41 +01:00
CalDescent
b571931127 Fixed formatting of services list 2023-05-05 22:35:19 +01:00
CalDescent
92b983a16e Q-Apps documentation updates. 2023-05-05 22:25:12 +01:00
CalDescent
3f71a63512 Increased timeout for other new actions. 2023-05-05 18:30:14 +01:00
CalDescent
86b5bae320 Set timeout of PUBLISH_MULTIPLE_QDN_RESOURCES to 60 mins. 2023-05-05 13:22:14 +01:00
CalDescent
3775135e0c Added helper methods to fetch lists of private or public service objects.
These can ultimately be used to help inform the cleanup manager on the best order to delete files when the node runs out of space. Public data should be given priority over private data (unless the node is part of a data market contract for that data - this isn't developed yet).
2023-05-05 12:39:11 +01:00
CalDescent
c172a5764b Added _PRIVATE services, to allow for publishing/validation of encrypted data.
New additions:

QCHAT_ATTACHMENT_PRIVATE
ATTACHMENT_PRIVATE
FILE_PRIVATE
IMAGE_PRIVATE
VIDEO_PRIVATE
AUDIO_PRIVATE
VOICE_PRIVATE
DOCUMENT_PRIVATE
MAIL_PRIVATE
MESSAGE_PRIVATE
2023-05-05 12:26:18 +01:00
CalDescent
1a5e3b4fb1 Added GET /names/search endpoint, to search names via case insensitive, partial name matching. 2023-05-05 11:24:52 +01:00
CalDescent
f39b6a15da Fixed refresh bug on Windows. 2023-05-05 11:03:13 +01:00
CalDescent
2dfee13d86 Remove all backslashes from vars in HTML parser (correct order this time) 2023-05-03 19:44:54 +01:00
CalDescent
b9d81645f8 Revert "Remove all backslashes from vars in HTML parser."
This reverts commit 9547a087b2.
2023-05-03 19:40:17 +01:00
CalDescent
9547a087b2 Remove all backslashes from vars in HTML parser. 2023-05-03 19:38:31 +01:00
CalDescent
e014a207ef Escape all vars added by HTML parser 2023-05-03 19:28:26 +01:00
CalDescent
611240650e Added GET /chat/messages/count endpoint, which is identical to /chat/messages but returns a count of the messages rather than the messages themselves. 2023-05-03 19:27:59 +01:00
CalDescent
c71dce92b5 Bump version to 4.0.1 2023-05-01 19:34:01 +01:00
CalDescent
34c3adf280 Limit MAIL and MESSAGE to 1MB. 2023-04-29 19:04:17 +01:00
CalDescent
95a1c6bf8b Added "encoding" parameter to the SEARCH_CHAT_MESSAGES action. 2023-04-29 17:48:58 +01:00
CalDescent
36e944d7e2 Added MAIL and MESSAGE services. 2023-04-29 17:45:38 +01:00
CalDescent
f044166b81 More qdnBase improvements, to hopefully handle all cases correctly. 2023-04-29 17:13:50 +01:00
CalDescent
aed1823afb Added support of simple Range headers when requesting QDN data. 2023-04-28 20:36:06 +01:00
CalDescent
6dfaaf0054 Set charset to UTF-8 in various places that bytes are converted to a string. 2023-04-28 13:06:29 +01:00
CalDescent
45bc2e46d6 Improved metadata trimming, to better handle multibyte UTF-8 characters. 2023-04-28 12:48:38 +01:00
CalDescent
46e2e1043d Fixed issue with <base href> introduced in v4.0.0 2023-04-28 12:18:27 +01:00
CalDescent
a3518d1f05 Revert "Fixed bug with base path."
This reverts commit ce52b39495.
2023-04-28 12:13:31 +01:00
CalDescent
0a1ab3d685 Added GET_QDN_RESOURCE_METADATA action. 2023-04-28 10:57:04 +01:00
CalDescent
5dbacc4db3 Added "Accept-Ranges" header when serving arbitrary data.
Allows for video seeking when using URL playback, even though the Range header isn't implemented yet. This could be heavily optimized by adding full support of the Range/Content-Range headers, however this is still a big step forward as it allows for (inefficient) seeking.
2023-04-28 10:12:16 +01:00
CalDescent
1ce2dcfb2b Fixed bug which prevented qortal:// URLs from working properly in most cases. 2023-04-25 08:33:33 +01:00
CalDescent
ed6333f82e Allow for faster and more frequent retries when QDN data fails to be retrieved (thanks to suggestions from @xspektrex) 2023-04-23 19:14:28 +01:00
CalDescent
f27c9193c7 Auto delete any metadata files that are unreadable (e.g. due to being empty, or invalid JSON). 2023-04-23 11:30:42 +01:00
CalDescent
e48529704c Bump version to 4.0.0 2023-04-22 16:08:09 +01:00
CalDescent
53508f9298 Fixed problems in last commit. 2023-04-22 11:33:59 +01:00
CalDescent
33aeec7e87 Added various new service types, in preparation for Q-Apps release. 2023-04-22 11:00:21 +01:00
QuickMythril
8f847d3689 Upgraded to TLSv1.3 2023-04-21 19:30:29 -04:00
CalDescent
16dc23ddc7 Added new actions to gateway handler. 2023-04-21 21:45:16 +01:00
CalDescent
e80494b784 Fixed unit test. 2023-04-21 20:22:18 +01:00
CalDescent
111ec3b483 Fixed typo 2023-04-21 20:05:24 +01:00
CalDescent
db4a9ee880 Return "Resource does not exist" error if requesting a non-existent resource via GET_QDN_RESOURCE_URL. 2023-04-21 19:50:01 +01:00
CalDescent
b1ebe1864b Fixed bug in error handling. 2023-04-21 19:27:24 +01:00
CalDescent
3c251c35ea Fixed divide by zero error in GET /arbitrary/resource/status/* 2023-04-21 18:21:41 +01:00
CalDescent
4954a1744b Fixed case sensitivity bugs. 2023-04-21 17:47:29 +01:00
QuickMythril
a7bbad17d7 Merge pull request #118 from QuickMythril/temp-test
Added missing parameter to test
2023-04-21 11:06:09 -04:00
QuickMythril
8ca9423c52 Added missing parameter to test 2023-04-21 10:58:09 -04:00
CalDescent
32b9b7e578 Use a temporary file when reading on-chain data. 2023-04-21 13:59:29 +01:00
CalDescent
f045e10ada Removed all case sensitivity when searching names. 2023-04-21 12:56:15 +01:00
CalDescent
560282dc1d Added "exactMatchNames" parameter to GET /arbitrary/resources/search 2023-04-21 12:55:59 +01:00
CalDescent
9cd6372161 Improved GET /admin/settings/{setting} further, in order to support all settings (fixes ones such as bitcoinNet). 2023-04-21 12:06:16 +01:00
CalDescent
2370a67b8a Merge branch 'master' into q-apps 2023-04-21 11:07:01 +01:00
CalDescent
0993903aa0 Added GET /settings/{setting} endpoint
Based on work by @QuickMythril, but modified to be generic.
2023-04-21 11:03:24 +01:00
CalDescent
f5e9b91d6b Merge pull request #116 from QuickMythril/restart
Added API call for restarting node
2023-04-21 10:27:58 +01:00
CalDescent
7fe507a497 Merge pull request #117 from QuickMythril/rm-swagger-check
Removed 3rd-party swagger server validation
2023-04-21 10:26:19 +01:00
CalDescent
10f12221c9 Fixed exception in readJson(), and removed some duplicated code. 2023-04-21 09:42:04 +01:00
QuickMythril
85980e4cfc Removed 3rd-party swagger server validation 2023-04-20 16:41:47 -04:00
QuickMythril
7bb6b84e86 Added API call for restarting node 2023-04-20 16:23:57 -04:00
CalDescent
dc25d33739 Merge branch 'master' into q-apps 2023-04-19 20:57:31 +01:00
CalDescent
358e67b050 Added "bindAddressFallback" setting, which defaults to "0.0.0.0".
Should fix problems on systems unable to use IPv6 wildcard (::) for listening, and avoids having to manually specify "bindAddress": "0.0.0.0" in settings.json.
2023-04-19 20:56:47 +01:00
CalDescent
8331241d75 Bump version to 3.9.1 2023-04-18 19:01:45 +01:00
CalDescent
e041748b48 Improved name rebuilding code, to handle some more complex scenarios. 2023-04-16 13:59:25 +01:00
CalDescent
06691af729 Merge pull request #113 from QuickMythril/drew-api
Added API calls to get node settings & get, create, and vote on polls
2023-04-16 11:43:31 +01:00
CalDescent
cfe6dfcd1c If nameFilter contains an empty or nonexistent list, return an empty array. 2023-04-15 18:27:55 +01:00
CalDescent
3f00cda847 "nameListFilter" added to LIST_QDN_RESOURCES and SEARCH_QDN_RESOURCES Q-Apps actions. 2023-04-15 16:02:25 +01:00
CalDescent
a286db2dfd "namefilter" param in GET /arbitrary/resources/search is now exact match, which makes more sense when filtering results by names in a list. 2023-04-15 15:55:52 +01:00
CalDescent
28bd4adcd2 Removed GET /arbitrary/resources/names endpoint, as it's unused and doesn't scale well. 2023-04-15 15:42:47 +01:00
CalDescent
61b7cdd025 Added "followedonly" and "excludeblocked" params to GET /arbitrary/resources and GET /arbitrary/resources/search, as well as LIST_QDN_RESOURCES and SEARCH_QDN_RESOURCES Q-Apps actions. 2023-04-15 15:24:10 +01:00
CalDescent
250245d5e1 Added new list management actions to Q-Apps documentation. 2023-04-15 14:34:30 +01:00
CalDescent
0258d2bcb6 Fixed layout issues recently introduced in documentation. 2023-04-15 14:31:41 +01:00
QuickMythril
735de93848 Removed internal use parameter from API endpoint 2023-04-15 09:25:28 -04:00
QuickMythril
57485bfe36 Removed check from poll tx that creator is owner 2023-04-15 09:11:27 -04:00
CalDescent
ed05560413 Gateway auth alert box replaced with a modal overlay in the lower right hand corner of the screen. 2023-04-15 10:11:33 +01:00
CalDescent
892b667f86 Fixed console errors seen in certain cases. 2023-04-15 09:57:26 +01:00
CalDescent
ea7a2224d3 Allow the name of a list to be specified as a "namefilter" param in GET /arbitrary/resources/search. Any names in the list will be included in the search (same as if they were specified manually via &name=). 2023-04-14 17:44:06 +01:00
CalDescent
20893879ca Allow multiple name parameters to optionally be included in GET /arbitrary/resources/search
Also updated SEARCH_QDN_RESOURCES action, to allow multiple names to be optionally specified via the "names" parameter.
2023-04-14 17:17:05 +01:00
CalDescent
b08e845dbb Updated docs to include sending of foreign coins 2023-04-14 16:24:27 +01:00
CalDescent
e60cd96514 Fixed occasional NPE seen in ArbitraryDataFileMessage 2023-04-14 11:02:27 +01:00
CalDescent
e2a2a1f956 Fixed bug with GET_QDN_RESOURCE_URL action. 2023-04-11 19:03:56 +01:00
CalDescent
7f53983d77 Added support for hash routing in URL shown in address bar. 2023-04-09 18:21:19 +01:00
CalDescent
ce52b39495 Fixed bug with base path. 2023-04-09 17:55:41 +01:00
CalDescent
3296779125 Update address bar when navigating within an app. 2023-04-09 17:11:20 +01:00
CalDescent
3dcd9d237c Added "_qdnBase" global javascript var, for apps to use as a basename / path prefix. 2023-04-07 19:03:28 +01:00
QuickMythril
23ec71d7be Renamed API calls from "voting" to "polls" 2023-04-06 04:48:18 -04:00
QuickMythril
5bbde4dcdb Added API calls to get polls & node settings 2023-04-06 04:41:51 -04:00
QuickMythril
dc2da8b283 Merge pull request #37 from DrewMPeacock/VotingTransactions
Fix up CREATE_POLL and VOTE_ON_POLL transactions to process and valid…
2023-04-06 03:35:58 -04:00
QuickMythril
f3772d19f5 Merge pull request #36 from DrewMPeacock/master
Add API handles to build CREATE_POLL and VOTE_ON_POLL transactions.
2023-04-06 03:34:50 -04:00
CalDescent
35def54ecc Added support for multiple block/follow lists.
Any list with the following prefix will be used in block/follow logic:

blockedNames
blockedAddresses
followedNames

For instance, any names in a list named "blockedNames_CustomBlockList" would also be blocked, along with those in the standard "blockedNames" list.

This will ultimately allow apps to offer custom block/follow lists to users (once list functionality is added to the Q-Apps API).
2023-04-02 14:42:49 +01:00
CalDescent
2086a2c476 Moved block/follow utility methods to a new ListUtils class 2023-04-02 10:58:16 +01:00
CalDescent
4835e5732d Fixed issue which caused UI to lock up when using qortalRequest() 2023-04-02 10:06:02 +01:00
CalDescent
d831972005 Fixed NPE in isMetadataEqual() 2023-03-31 18:41:58 +01:00
CalDescent
f6914821d3 Always use PUT for on-chain data. 2023-03-31 18:30:05 +01:00
CalDescent
073d124aef Add "percentLoaded" to resource statuses. 2023-03-31 17:27:04 +01:00
CalDescent
a83e332c11 Major upgrade of arbitrary data functionality, to support on-chain data for small payloads.
Max size for on-chain data is 239 bytes, due to 16-byte IV. Must be a single file resource, without .qortal folder.
2023-03-31 15:53:08 +01:00
CalDescent
7deb9328fa Don't delete metadata when deleting a resource from the Data Management screen. 2023-03-31 15:36:51 +01:00
CalDescent
e598d7476b Updated documentation, to discourage custom timeouts. 2023-03-31 14:58:30 +01:00
CalDescent
85735fabb2 Block external links. 2023-03-31 13:03:46 +01:00
CalDescent
7392082875 Ignore "qdnAuthBypassEnabled" setting when in gateway mode. 2023-03-31 09:48:25 +01:00
CalDescent
88f8041b05 Updated testnet documentation. 2023-03-31 09:47:38 +01:00
CalDescent
3109c3bb16 Increased timeouts to 5 mins for various actions that require the user to confirm. 2023-03-29 18:47:03 +01:00
CalDescent
8d462dedfa Added routing info to documentation. 2023-03-28 19:18:21 +01:00
CalDescent
fdd9741936 Documentation updates. 2023-03-26 21:19:10 +01:00
CalDescent
929d0ac897 Added "name" filter to GET /arbitrary/resources and LIST_QDN_RESOURCES. 2023-03-24 10:31:35 +00:00
CalDescent
952d18390b Q-Apps documentation updates 2023-03-24 09:53:49 +00:00
CalDescent
bc026d9d1c Merge branch 'master' into q-apps 2023-03-22 21:48:18 +00:00
CalDescent
ea2577d1c3 Include "created" and "updated" timestamps in GET /arbitrary/resource/* API endpoints. 2023-03-22 21:44:32 +00:00
CalDescent
c78593cf15 Reduced MAX_DESCRIPTION_LENGTH from 500 to 240, in preparation for upcoming arbitrary db reshape. 2023-03-22 21:40:26 +00:00
CalDescent
de4523c34e Added support for custom URL routing when using the APP service.
Unhandled requests (where no file exists) are now forwarded to the index file, to allow for custom routing in the app. This applies to the APP service only. For the WEBSITE and other services, unhandled requests will return a 404. In future, we may be able to allow websites to opt in to URL routing too, and maybe even allow both services to specify custom routing rules in a file.
2023-03-22 21:38:02 +00:00
CalDescent
b08329dcf1 Bump version to 3.9.0 2023-03-22 20:14:11 +00:00
CalDescent
668be633c4 arbitraryOptionalFeeTimestamp set to Friday, 31st March 2023 at 16:00:00 2023-03-22 19:58:51 +00:00
CalDescent
ea6225ab9a Include "mimeType" in metadata for single file resources (but only when a metadata file would have otherwise been created). 2023-03-19 16:27:57 +00:00
CalDescent
055b66e835 Fixed gateway prefix bugs. 2023-03-19 11:02:49 +00:00
CalDescent
2a7a2d3220 Added gateway-specific Q-Apps handler. For now, just show a warning alert if an app requires authentication / interactive features. 2023-03-19 10:41:37 +00:00
CalDescent
73a7c1fe7e More improvements to Service handling. 2023-03-19 10:18:13 +00:00
CalDescent
2848ae695c More improvements to Service handling. 2023-03-19 10:17:56 +00:00
CalDescent
713fd4f0c6 Added GET_QDN_RESOURCE_PROPERTIES Q-App action. 2023-03-19 08:56:06 +00:00
CalDescent
519bb10c60 Updated docs for PUBLISH_QDN_RESOURCE, to include "filename" parameter. 2023-03-18 18:15:28 +00:00
CalDescent
3a64336d9f If the MIME type can't be determined from the file's contents, fall back to using the filename. 2023-03-18 17:57:07 +00:00
CalDescent
5ecc633fd7 GET /arbitrary/resource/properties/{service}/{name}/{identifier} can now extract the MIME type from the file's contents as an alternative to using the filename. 2023-03-18 17:50:13 +00:00
CalDescent
1b9afce21f Filename API renamed to GET /resource/properties/{service}/{name}/{identifier}.
Now returns filename, size, and mimeType where available.
2023-03-18 16:39:23 +00:00
CalDescent
f9f34a61ac Treat service as an int in other parts of ArbitraryTransactionData too 2023-03-18 15:44:01 +00:00
CalDescent
46b225cdfb Treat service as an int in other parts of ArbitraryTransactionData too 2023-03-18 15:18:36 +00:00
CalDescent
4ce3b2a786 Added GET /resource/filename/{service}/{name}/{identifier} endpoint.
This allows the filename of single file resources to be returned via the API. Useful to help determine to file format of the data.
2023-03-18 15:16:41 +00:00
CalDescent
87ed49a2ee Added optional "filename" parameter when publishing data from a string or base64-encoded string.
This causes the data to be stored with the requested filename, instead of generating a random one. Also, randomly generated filenames now use a timestamp instead of a random number.
2023-03-18 15:11:53 +00:00
CalDescent
a555f503eb Treat service as an int in ArbitraryTransactionData 2023-03-18 10:41:53 +00:00
CalDescent
50780aba53 Set max size of APP service to 50MB. 2023-03-18 10:41:14 +00:00
CalDescent
2bee3cbb5c Treat service as an int in ArbitraryTransactionData 2023-03-18 10:40:27 +00:00
CalDescent
534a44d0ce Fixed bugs with URL building. 2023-03-17 22:58:14 +00:00
CalDescent
469c1af0ef Added new search features to the SEARCH_QDN_RESOURCES action.
Existing action renamed to LIST_QDN_RESOURCES, which is an alternative for listing QDN resources without using a search query.
2023-03-17 22:11:34 +00:00
CalDescent
5656100197 Added "identifier", "name", and "prefix" parameters to GET /arbitrary/resources/search endpoint.
- "identifier" is an alternative to "query" that will search identifiers only.
- "name" is an alternative to "query" that will search names only.
- "query" remains the same as before - it searches both name and identifier fields.
- "prefix" is a boolean, and when true it will only match the beginning of each field. Works with "identifier", "name", and "query" params.
2023-03-17 19:47:57 +00:00
CalDescent
d9cac6db39 Allow "data:" URLs to be played in app/website media players.
E.g: src="data:video/mp4;base64,VideoContentEncodedInBase64GoesHere"
2023-03-17 19:33:41 +00:00
CalDescent
98b0b1932d Merge branch 'master' into q-apps 2023-03-17 13:17:47 +00:00
CalDescent
9968865d0e Updated parsing of "encoding" in websockets, for consistency with other params. 2023-03-17 13:17:23 +00:00
CalDescent
05eb337367 Added optional limit/offset/reverse query string params to GET /websockets/chat/messages.
Without this, the websocket returns all messages on connection, which is very time consuming.
2023-03-17 13:15:57 +00:00
CalDescent
5386db8a3f Added ping/pong functionality to CHAT websockets. 2023-03-17 13:11:01 +00:00
CalDescent
edae7fd844 Added optional "encoding" query string param for various chat APIs and websockets, as base58 is too slow for the amount of data it is now processing.
Usage:
Add `encoding=BASE64` query string parameter to opt in to base64 encoding of returned chat data. Defaults to BASE58 for backwards support.

Compatible endpoints:
GET /chat/messages
GET /chat/message/{signature}
GET /chat/active/{address}
GET /websockets/chat/active/*
GET /websockets/chat/messages
2023-03-17 12:46:14 +00:00
CalDescent
4840804d32 Fixed qdn utility usage docs. 2023-03-17 10:22:26 +00:00
CalDescent
a4551245cb Improved error logging in BlockArchiveUtils.importFromArchive() 2023-03-12 19:08:57 +00:00
CalDescent
e4f45c1a70 Break out of orphan loop when stopping. 2023-03-12 19:08:07 +00:00
CalDescent
bc44b998dc The transaction sequences reshape now fetches transactions from the archive.
This is required as it's the only place that holds the original order of each block's transactions. We cannot sort them, because the comparator function for transactions has some dependencies on the existing order for AT transactions. As a result, topOnly nodes cannot perform this reshape, and will be unable to run this version.
2023-03-10 21:29:35 +00:00
CalDescent
b89a35ac69 Merge branch 'master' into block-sequence
# Conflicts:
#	src/main/java/org/qortal/controller/Controller.java
#	src/main/java/org/qortal/repository/RepositoryManager.java
2023-03-10 19:52:05 +00:00
CalDescent
b5cb5f1da3 Fixed bug causing cache invalidation to be skipped, due to incorrect message reuse.
The "Data Management" screen should now update correctly without a core restart.
2023-03-10 19:46:58 +00:00
CalDescent
101023ba1d Updated link. 2023-03-10 16:39:14 +00:00
CalDescent
ed73162881 Merge branch 'master' into q-apps 2023-03-10 15:41:31 +00:00
CalDescent
0388626e42 Use a lower file size target (10MB instead of 100MB) when using archive V2, as the average block size is over 90% smaller. 2023-03-10 15:41:07 +00:00
CalDescent
c5c0dcf0f2 Testnet arbitraryOptionalFeeTimestamp set to Sun Mar 12 2023 at 12:00:00 UTC 2023-03-10 14:59:33 +00:00
CalDescent
384f592f59 Added testnet files to testnet/ directory.
This will be maintained with future feature triggers etc.
2023-03-10 14:59:27 +00:00
CalDescent
1528e05e0b Testnet arbitraryOptionalFeeTimestamp set to Sun Mar 12 2023 at 12:00:00 UTC 2023-03-10 14:29:52 +00:00
CalDescent
82c66c0555 Added testnet files to testnet/ directory.
This will be maintained with future feature triggers etc.
2023-03-10 14:28:13 +00:00
CalDescent
b5ce8d5fb3 Merge branch 'master' into q-apps
# Conflicts:
#	src/main/java/org/qortal/api/resource/ArbitraryResource.java
2023-03-10 14:03:08 +00:00
CalDescent
b4a736c5d2 Added optional "sender" filter to GET /chat/messages 2023-03-10 13:53:46 +00:00
CalDescent
4afbca7ed2 Merge branch 'rebuild-archive' 2023-03-10 11:50:09 +00:00
CalDescent
44aa0a6026 Catch ArithmeticException in block minter, so that it retries instead of giving up completely. 2023-03-10 10:00:30 +00:00
CalDescent
b1452bddf3 Added BlockArchiveV2 tests, and updated the V1 tests now that we no longer support bulk archiving/pruning 2023-03-06 17:17:55 +00:00
CalDescent
96ac883515 Throw exception and break out of loop if archive rebuilding fails 2023-03-06 14:40:17 +00:00
CalDescent
b6803490b9 Archive version is now loaded from the version of block 2 in the existing archive, or "defaultArchiveVersion" in settings if not available (default: 1). 2023-03-06 14:13:58 +00:00
CalDescent
3739920ad3 Added support for an optional fee in arbitrary transactions, to give the option for data to be published instantly (i.e. no proof of work / mempow required when fee is sufficient).
Takes effect at a future undecided timestamp.
2023-03-06 13:17:48 +00:00
CalDescent
7f21ea7e00 Added new bootstrap host 2023-03-05 13:16:58 +00:00
CalDescent
83b0ce53e6 Fixed bug in JSON validation. 2023-03-05 13:16:08 +00:00
CalDescent
d6ab9eb066 Rework of service validation, to allow a service to be specified as a single file resource.
This removes some complexity and duplication from custom validation functions. Q-Chat QDN functionality will need a re-test.
2023-03-05 11:39:53 +00:00
CalDescent
ac60ef30a3 Added JSON service, with a maximum size of 25KB, and a requirement that the data must be valid JSON. 2023-03-05 10:51:26 +00:00
CalDescent
94f14a39e3 Ensure theme is transferred when visiting a linked resource. 2023-03-03 18:16:35 +00:00
CalDescent
4b7844dc06 Pass the UI's theme to Q-Apps themselves, so they have the option of adapting to the user's theme.
Variable name is _qdnTheme, and possible values are "dark" or "light"
2023-03-03 17:55:46 +00:00
CalDescent
c40d0cc67b Same fix again but for multi file resources too. 2023-03-03 17:47:14 +00:00
CalDescent
3318093a4f Fixed preview functionality for resources other than websites/apps. 2023-03-03 17:33:15 +00:00
CalDescent
cf0681d7df Only rebuild if transaction has a name. 2023-03-03 17:10:45 +00:00
CalDescent
7d7cea3278 Only rebuild if transaction has a name. 2023-03-03 17:10:14 +00:00
CalDescent
7d38fa909d Rebuild name in ArbitraryTransaction.preProcess() 2023-03-03 16:15:10 +00:00
CalDescent
0b05de22a0 Rebuild name in ArbitraryTransaction.preProcess() 2023-03-03 16:14:43 +00:00
CalDescent
308196250e Updated documentation. 2023-03-03 16:13:49 +00:00
CalDescent
b254ca7706 Added support for optional Base64 encoding in FETCH_QDN_RESOURCE. 2023-03-03 15:39:37 +00:00
CalDescent
9ea2d7ab09 Updated documentation to remove an action that isn't supported in Q-Apps v1. 2023-03-03 14:24:10 +00:00
CalDescent
d166f625d0 Rework of preview mode.
All /arbitrary endpoints responsible for publishing data now support an optional "preview" query string parameter. If true, these endpoints will return a URL path to open the preview, rather than returning transaction bytes.
2023-03-03 14:20:45 +00:00
CalDescent
8e2dd60ea0 Increased default timeout for GET_USER_ACCOUNT from 30 seconds to 1 hour, to give the user more time to grant permissions. 2023-03-03 13:20:17 +00:00
CalDescent
d51f9368ef Fixed bug in HTML parser 2023-03-03 12:39:44 +00:00
CalDescent
b17035c864 Escape QDN vars and prefix with underscores. 2023-03-03 11:57:07 +00:00
CalDescent
fa14568cb9 Fixed issue causing "totalChunkCount" to exclude the metadata file in some cases.
ArbitraryDataFile now has a fileCount() method which returns the total number of files associated with that piece of data - i.e. chunks, metadata, and the complete file in cases where it isn't chunked.
2023-03-03 10:42:43 +00:00
CalDescent
64cd21b0dd Merge branch 'master' into q-apps 2023-02-28 22:03:19 +00:00
CalDescent
abdc265fc6 Removed legacy bulk archiving/pruning code that is no longer needed. 2023-02-26 16:54:14 +00:00
CalDescent
1153519d78 Various fixes as a result of moving to archive version 2. 2023-02-26 16:53:43 +00:00
CalDescent
0af6fbe1eb Added POST /repository/archive/rebuild endpoint to allow local archive to be rebuilt.
When "archiveVersion" is set to 2 in settings, this should allow the archive size to reduce by over 90%. Some nodes might want to maintain an older/larger version, for the purposes of development/debugging, so this is currently opt-in.
2023-02-26 16:52:48 +00:00
CalDescent
d54006caf7 Added "archiveVersion" setting, which specifies the archive version to be used when building. Defaults to 1 for now, but will bump to version 2 at the time of a wider rollout. 2023-02-26 15:59:18 +00:00
CalDescent
e1771dbaea Merge branch 'master' into rebuild-archive 2023-02-26 14:29:37 +00:00
CalDescent
9566bda279 Merge branch 'master' into block-sequence 2023-02-26 12:55:35 +00:00
CalDescent
cc98abeffb Reduced log spam 2023-02-26 12:51:52 +00:00
CalDescent
a3702ac6b0 Revert "Merge pull request #111 from AlphaX-Projects/master"
This reverts commit 69902f7f5b, reversing
changes made to 466c727dee.
2023-02-26 12:45:38 +00:00
CalDescent
c1ffe557e1 Fixed wording in marshaller exceptions. 2023-02-24 13:42:59 +00:00
CalDescent
c310a7c5e8 Added "X-API-VERSION" header support in POST /transactions/process.
Default is version "1". If version "2" is specified, the API will return the full transaction JSON on success, rather than just "true".

Example usage:

curl -X POST "http://localhost:12391/transactions/process" -H  "X-API-VERSION: 2" -d "signedTransactionBytesHere"
2023-02-24 13:41:52 +00:00
CalDescent
c5a0b00cde Q-Apps documentation updates based on UI development progress. 2023-02-24 12:15:22 +00:00
QuickMythril
69902f7f5b Merge pull request #111 from AlphaX-Projects/master
Update hsqldb and grpc
2023-02-24 05:02:32 -05:00
AlphaX-Projects
999e8b8aca Update pom.xml 2023-02-24 09:12:57 +01:00
CalDescent
466c727dee Bump version to 3.8.9 2023-02-22 19:01:10 +00:00
CalDescent
ba9f3b335c Added unit test to reproduce the UPDATE_NAME issue and prove that the fix is working correctly. 2023-02-22 18:59:43 +00:00
CalDescent
148ca0af05 Fixed long term bug with UPDATE_NAME transactions, causing name data to be incorrectly deleted if newName == name. 2023-02-22 09:16:52 +00:00
CalDescent
c39b9c764b Bump version to 3.8.8 2023-02-20 18:12:40 +00:00
CalDescent
d30eb6141a Default minPeerVersion set to 3.8.7 2023-02-20 18:10:21 +00:00
CalDescent
52c806f9e6 Bump version to 3.8.7 2023-02-19 22:44:59 +00:00
CalDescent
b2d31a7e02 Rebuild the name's history before processing a CancelSellNameTransaction. 2023-02-19 22:26:22 +00:00
CalDescent
cfa0b1d8ea Bump version to 3.8.6 2023-02-19 18:02:22 +00:00
CalDescent
edacce1bac Improved logging when creating bootstraps, and catch/log all exceptions. 2023-02-19 17:43:13 +00:00
CalDescent
074cba2266 Added QCHAT_AUDIO and QCHAT_VOICE services (limited to 10MB each) 2023-02-19 17:33:17 +00:00
CalDescent
7f23ef64a2 Updated /arbitrary/metadata/* response when not found. 2023-02-17 17:37:30 +00:00
CalDescent
5b7e9666dc Send URL updates to the UI when pages are loaded. 2023-02-17 15:40:06 +00:00
CalDescent
20d4e88fab Fixed API endpoints relying on getTransactionsFromSignature(), which therefore won't have worked properly since core V2. 2023-02-12 13:21:54 +00:00
CalDescent
a8c27be18a Modified AT and transaction repository queries to use Transactions.block_sequence instead of BlockTransactions.sequence.
The former is available for all blocks, whereas the latter is only available for unpruned blocks.

Also removed joins with the Blocks table - as the Blocks table is also pruned - and instead retrieved the height from the Transactions table.
2023-02-12 13:21:41 +00:00
CalDescent
af6be759e7 Fixed long term issue where logs would report "Repository in use by another process?" when the database actually failed to start for some other reason. It will now log the correct reason. 2023-02-12 13:20:31 +00:00
CalDescent
896d814385 Add block_sequence to Transactions table, and populate all past transactions.
This data was being lost when pruning the BlockTransactions table.

Note: on first run this will reshape the db, which can take several minutes.
2023-02-12 13:20:23 +00:00
QuickMythril
f4a32d19dd Merge pull request #110 from QuickMythril/update-electrumx
Add new ElectrumX servers
2023-02-12 00:10:56 -05:00
QuickMythril
eb6d84c04d Add new ElectrumX servers 2023-02-12 00:10:13 -05:00
CalDescent
26587067d8 Merge pull request #109 from AlphaX-Projects/master
Add more data to active chats websocket
2023-02-11 13:25:13 +00:00
AlphaX-Projects
227d93a31e Merge branch 'Qortal:master' into master 2023-02-11 13:39:26 +01:00
CalDescent
76f17dda53 Merge branch 'master' into rebuild-archive 2023-02-10 17:48:05 +00:00
CalDescent
830bae3dc1 Merge branch 'at-states-fix'
# Conflicts:
#	src/main/java/org/qortal/controller/repository/AtStatesPruner.java
#	src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
2023-02-10 17:45:22 +00:00
CalDescent
ec09312cc5 Updated AdvancedInstaller project for 3.8.5 2023-02-10 17:42:12 +00:00
AlphaX-Projects
11654ba9c6 Add Chat Data 2023-02-10 11:05:54 +01:00
AlphaX-Projects
ea356d1026 add signatures to websockt 2023-02-10 10:27:28 +01:00
CalDescent
e7a3e511bd Bump version to 3.8.5 2023-02-08 19:37:01 +00:00
CalDescent
6fca30ce75 Added GET /admin/summary/alltime endpoint, to view a summary of chain activity since genesis. 2023-02-07 19:56:54 +00:00
CalDescent
e903e59f7f Merge pull request #107 from QuickMythril/unused-address
Add unused foreign address to API calls
2023-02-06 18:43:22 +00:00
CalDescent
bef170df7e Updated PirateChain lightwallet servers. 2023-02-06 18:42:37 +00:00
QuickMythril
386bfa4e20 Merge pull request #108 from AlphaX-Projects/master
Add electrum servers and fix java reflect error
2023-02-05 07:38:30 -05:00
AlphaX-Projects
6f867031e2 Add electrum servers and fix java reflect error 2023-02-05 12:53:49 +01:00
QuickMythril
8f589391a6 Updated depreciated actions
Node.js 12 actions are deprecated. Please update the following actions to use Node.js 16: actions/checkout@v2, actions/cache@v2, actions/setup-java@v2. For more information see: https://github.blog/changelog/2022-09-22-github-actions-all-actions-will-begin-running-on-node16-instead-of-node12/
2023-02-04 21:57:31 -05:00
QuickMythril
30c9f63cb1 Add unused foreign address to API calls
POST ​/crosschain​/{COIN}/unusedaddress
2023-02-04 21:03:55 -05:00
QuickMythril
952b21d9bd Merge pull request #105 from QuickMythril/update-electrumx
Updated ElectrumX servers
2023-02-04 18:40:39 -05:00
QuickMythril
1f410a503e Updated ElectrumX servers 2023-02-04 18:30:31 -05:00
CalDescent
ae5b713e58 Rework of AT state trimming and pruning, in order to more reliably track the "latest" AT states.
This should fix an edge case where AT states data was pruned/trimmed but it was then later required in consensus. The older state was deleted because it was replaced by a new "latest" state in a brand new block. But once the new "latest" state was orphaned from the block, the old "latest" state was then required again.

This works around the problem by excluding very recent blocks in the latest AT states data, so that it is unaffected by real-time sync activity.

The trade off is that we could end up retaining more AT states than needed, so a secondary cleanup process may need to run at some time in the future to remove these. But it should only be a minimal amount of data, and can be cleaned up with a single query. This would have been happening to a certain degree already.

# Conflicts:
#	src/main/java/org/qortal/controller/repository/AtStatesPruner.java
#	src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
2023-02-03 12:39:27 +01:00
CalDescent
257ca2da05 Bumped default block archive serialization version to V2. 2023-02-03 12:36:57 +01:00
CalDescent
d27316eb64 Clear cache after rebuilding. 2023-02-02 18:11:56 +01:00
CalDescent
64d8353629 Added V2 support in the block archive, and added feature to rebuild a V1 block archive using V2 block serialization. Should drastically reduce the archive size once rebuilt. 2023-02-02 15:54:03 +01:00
CalDescent
f5e30eeaf5 Merge pull request #104 from QuickMythril/foreign-height
Add foreign chain height to API calls
2023-02-01 20:28:55 +00:00
QuickMythril
21f5d9a3d0 Add foreign chain height to API calls
GET ​/crosschain​/{COIN}/height
2023-01-31 17:23:25 -05:00
CalDescent
3077810ea8 Fixed bugs causing websites to report as "Not published" when listed in the UI. 2023-01-29 18:05:04 +00:00
CalDescent
4ba2f7ad6a Small documentation updates 2023-01-29 17:20:25 +00:00
CalDescent
8eba0f89fe Added to Q-Apps documentation 2023-01-29 17:09:28 +00:00
CalDescent
600f98ddab Fixed bug in extractComponents() 2023-01-29 13:38:08 +00:00
CalDescent
eb07e6613f Fixed small bug 2023-01-29 13:23:12 +00:00
CalDescent
6c445ff646 GET_ACCOUNT_ADDRESS and GET_ACCOUNT_PUBLIC_KEY replaced with a single action: GET_USER_ACCOUNT, as it doesn't make sense to request address and public key separately (they are essentially the same thing). 2023-01-29 13:23:01 +00:00
CalDescent
4d9cece9fa Timeouts are specified by action, rather than using 10 second for every request. This allows certain requests to wait for longer before timing out, such as ones that create transactions. 2023-01-29 13:07:26 +00:00
CalDescent
8beffd4dae Switched to document.querySelectorAll() as otherwise we were only intercepting the first image on the page. 2023-01-29 12:12:47 +00:00
CalDescent
566c6a3f4b Added support for img src updates from a Q-App.
Example:

document.getElementById("logo").src = "qortal://thumbnail/QortalDemo/qortal_avatar";
2023-01-29 12:04:39 +00:00
CalDescent
1be3ae267e Reduce log spam. 2023-01-29 11:45:09 +00:00
CalDescent
7af551fbc5 Added "GET_QDN_RESOURCE_URL" Q-Apps action, to allow a website/app to programmatically determine the URL to retrieve any QDN resource it needs to access.
Examples:

### Get URL to load a QDN resource
```
let url = await qortalRequest({
    action: "GET_QDN_RESOURCE_URL",
    service: "THUMBNAIL",
    name: "QortalDemo",
    identifier: "qortal_avatar"
    // path: "filename.jpg" // optional - not needed if resource contains only one file
});
```

### Get URL to load a QDN website
```
let url = await qortalRequest({
    action: "GET_QDN_RESOURCE_URL",
    service: "WEBSITE",
    name: "QortalDemo",
});
```

### Get URL to load a specific file from a QDN website
```
let url = await qortalRequest({
    action: "GET_QDN_RESOURCE_URL",
    service: "WEBSITE",
    name: "AlphaX",
    path: "/assets/img/logo.png"
});
```
2023-01-29 11:44:59 +00:00
CalDescent
6ba6c58843 Added support for qortal:// protocol links when loading images from the DOM.
Example: <img src="qortal://THUMBNAIL/QortalDemo/qortal_avatar" />
2023-01-29 11:18:00 +00:00
CalDescent
ab34fae810 Merge pull request #90 from QuickMythril/german
Updated German translations
2023-01-28 20:22:11 +00:00
CalDescent
42f2d015b7 Merge branch 'master' into german 2023-01-28 20:22:02 +00:00
CalDescent
2181ece28d Merge pull request #89 from lexandr0s/patch-2
Update ApiError_ru.properties
2023-01-28 20:21:13 +00:00
CalDescent
03a5d0e5f9 Merge pull request #88 from lexandr0s/patch-1
Update SysTray_ru.properties
2023-01-28 20:21:00 +00:00
CalDescent
352f094272 Merge pull request #99 from Nuc1eoN/polish-translation
Add polish translation
2023-01-28 20:20:35 +00:00
CalDescent
ca09dd264f Merge branch 'master' into q-apps 2023-01-28 20:14:35 +00:00
CalDescent
eea98d0bc7 Fixed bugs. 2023-01-28 18:37:04 +00:00
CalDescent
9c58faa7c2 Added LINK_TO_QDN_RESOURCE support in the gateway. 2023-01-28 18:36:55 +00:00
CalDescent
3cdfa4e276 Increased loading screen refresh interval from 1s to 2s. 2023-01-28 18:03:00 +00:00
CalDescent
380ba5b8c2 Show "File not found" on the loading screen when navigating to a non-existent resource. 2023-01-28 18:01:52 +00:00
CalDescent
04f248bcdd Upgraded gateway to support service and identifier.
The URL used to access the gateway is now interpreted, and the most appropriate resource is served. This means it can be used in different ways to retrieve any type of content from QDN. For example:

/QortalDemo
/QortalDemo/minting-leveling/index.html
/WEBSITE/QortalDemo
/WEBSITE/QortalDemo/minting-leveling/index.html
/APP/QortalDemo
/THUMBNAIL/QortalDemo/qortal_avatar
/QCHAT_IMAGE/birtydasterd/qchat_BfBeCz
/ARBITRARY_DATA/PirateChainWallet/LiteWalletJNI/coinparams.json
2023-01-28 17:56:24 +00:00
CalDescent
37b20aac66 Upgraded rendering to support identifiers, as well as single file resources.
This allows any QDN resource (e.g. an IMAGE) to be linked to from a website/app and then rendered on screen. It isn't yet supported in gateway or domain map mode, as these need some more thought.
2023-01-28 16:55:04 +00:00
CalDescent
e1e52b3165 RenderResource moved to restricted resources, as /render/* endpoints shouldn't ever need to be served over the gateway. 2023-01-28 15:52:46 +00:00
CalDescent
c5c826453b Removed unnecessary join when finding MESSAGE transactions, which caused secret to be unavailable when querying pruned blocks. 2023-01-28 15:41:48 +00:00
CalDescent
e86b9b1caf Added additional Litecoin ElectrumX server. 2023-01-28 15:34:30 +00:00
CalDescent
46e8baac98 Added linking between QDN websites / apps.
The simplest way to link to another QDN website is to include a link with the format:
<a href="qortal://WEBSITE/QortalDemo">link text</a>

This can be expanded to link to a specific path, e.g:
<a href="qortal://WEBSITE/QortalDemo/minting-leveling/index.html">link text</a>

Or it can be initiated programatically, via qortalRequest():

let res = await qortalRequest({
    action: "LINK_TO_QDN_RESOURCE",
    service: "WEBSITE",
    name: "QortalDemo",
    path: "/minting-leveling/index.html" // Optional
});

Note that qortal:// links don't yet support identifiers, so the above format is not confirmed.
2023-01-28 15:22:03 +00:00
CalDescent
3b6e1ea27f Added "qdnContext" variable, with possible values of "render", "gateway", or "domainMap".
This is used internally to allow Q-Apps to determine how to handle certain requests.
2023-01-28 14:42:29 +00:00
CalDescent
5a1cc7a0de Fixed/improved logging when an exception is caught whilst adding statuses to resources. 2023-01-28 14:32:17 +00:00
CalDescent
0ec5e39517 Fixed additional NPE 2023-01-28 14:31:04 +00:00
CalDescent
bede5a71f8 Fixed various NPEs when checking statuses of non-existent resources. 2023-01-28 14:17:23 +00:00
CalDescent
5e750b4283 Added new ArbitraryResourceStatus "NOT_PUBLISHED" - for when a non-existent resource is attempted to be loaded. 2023-01-28 14:15:54 +00:00
CalDescent
4a42dc2d00 Don't require prior authorization of QDN resources if qdnAuthBypassEnabled is true. Necessary for resource linking. 2023-01-28 14:14:44 +00:00
CalDescent
7fc170575c Merge branch 'cancel-sell-name-fixes' 2023-01-28 12:11:42 +00:00
CalDescent
876658256f Prevent a P2SH address being funded for a trade if there is an unconfirmed buy or cancel request in progress for it already.
This prevents foreign coins from leaving the local wallet when there is a high probability that the trade will fail, and therefore should reduce the chances of losing transaction fees due to refunds.

Whenever this occurs, the UI will show "Trade has an existing buy request or is pending cancellation." after clicking Buy.
2023-01-28 11:57:15 +00:00
CalDescent
a24ba40d5c Added additional Dogecoin ElectrumX server. 2023-01-28 09:54:15 +00:00
CalDescent
06d8a21714 Added CANCEL_SELL_NAME equivalents to NamesDatabaseIntegrityCheck.java 2023-01-27 19:38:26 +00:00
CalDescent
ae44065d7e Fixed issue with CancelSellName transactions. 2023-01-27 19:34:23 +00:00
CalDescent
6ad0989ea2 Reduce log spam 2023-01-27 18:35:44 +00:00
CalDescent
5962ebd08a More logging improvements in ArbitraryDataReader.decrypt() 2023-01-27 16:56:53 +00:00
CalDescent
bf06d47842 Create an ArbitraryDataResource object when building. Eventually this could be passed in to the reader instead of the individual components (service, name, identifier, etc)
This is now used to improve logging when extracting.
2023-01-27 16:55:43 +00:00
CalDescent
d7b1615d4f qdnAuthBypassEnabled defaulted to true, as it is needed for Q-Apps. 2023-01-27 16:26:36 +00:00
CalDescent
8c708558cb Implemented ElectrumX version negotiation. Fixes issues with DOGE wallet. 2023-01-27 14:33:34 +00:00
CalDescent
6b36d94c6f Removed searchResultsTransactions cache, to simplify code. The hostedTransactions cache is still in place, which limits disk reads when searching, so this additional cache isn't really needed. 2023-01-27 12:48:42 +00:00
CalDescent
8c41a4a6b3 Moved BootstrapResource to restricted resources 2023-01-22 21:08:42 +00:00
CalDescent
8dffe1e3ac Another rewrite of Q-App APIs, which removes the /apps/* redirects and instead calls the main APIs directly.
- All APIs are now served over the gateway and domain map, with the exception of /admin/*
- AdminResource moved to a "restricted" folder, so that it isn't served over the gateway/domainMap ports.
- This opens the door to websites/apps calling core APIs directly for certain read-only functions, as an alternative to using qortalRequest().
2023-01-22 18:59:46 +00:00
CalDescent
932a553b91 Merge branch 'master' into q-apps
# Conflicts:
#	src/main/java/org/qortal/api/resource/ArbitraryResource.java
2023-01-22 16:37:02 +00:00
CalDescent
1d568fa462 Return file lists via /arbitrary/metadata/* endpoints, but exclude it from /arbitrary/resources/* endpoints. 2023-01-22 16:29:23 +00:00
CalDescent
328ba48224 Merge branch 'master' into qdn-file-list 2023-01-22 16:12:54 +00:00
CalDescent
6196841609 Allow files without extensions in QCHAT_ATTACHMENT validation. 2023-01-22 15:59:16 +00:00
CalDescent
9f30571b12 Use a filename without an extension when publishing data from a string (instead of .tmp) 2023-01-22 15:58:53 +00:00
CalDescent
1f7fec6251 Exclude .qortal directory in validation functions, as it was incorrectly failing with "DIRECTORIES_NOT_ALLOWED". 2023-01-20 10:40:20 +00:00
CalDescent
c3f19ea0c1 Don't allow the custom validation methods to evade superclass validation. 2023-01-20 10:21:05 +00:00
CalDescent
e31515b4a2 Fixed bugs preventing single file GIF repositories and QCHAT attachments from passing validation. 2023-01-20 10:14:42 +00:00
CalDescent
8ad46b6344 Fixed/removed incorrect comments 2023-01-20 09:58:28 +00:00
CalDescent
57eacbdd59 Added "GET_PRICE" action. 2023-01-19 20:47:06 +00:00
CalDescent
86d6037af3 Added "SEARCH_TRANSACTIONS" action. 2023-01-19 20:22:29 +00:00
CalDescent
ca80fd5f9c Added "FETCH_BLOCK" and "FETCH_BLOCK_RANGE" Q-Apps actions. 2023-01-19 20:05:46 +00:00
CalDescent
03a54691a1 Merge branch 'master' into q-apps 2023-01-19 19:57:01 +00:00
CalDescent
3c8088e463 Removed all code duplication for Q-Apps API endpoints.
Requests are now internally routed to the existing API handlers. This should allow new Q-Apps API endpoints to be added much more quickly, as well as removing the need to maintain their code separately from the regular API endpoints.
2023-01-19 19:56:50 +00:00
CalDescent
2f7912abce Compute balances for Bitcoin-like coins using unspent outputs. Should fix occasional incorrect balance issue, and speed up loading time. 2023-01-18 19:30:43 +00:00
CalDescent
64529e8abf Added "reverse" and "includeOnlineSignatures" params to GET /blocks/range/{height} endpoint. 2023-01-18 19:04:54 +00:00
CalDescent
9d81ea7744 Bump version to 3.8.4 2023-01-16 20:26:00 +00:00
CalDescent
688acd466c Set checkpoint to block 1136300 2023-01-16 20:23:43 +00:00
CalDescent
81cf46f5dd Disable block signing on topOnly nodes. Minting rewards are still earned on topOnly for now. 2023-01-16 20:18:23 +00:00
CalDescent
4c52d6f0fc Fixed bug causing initial latestATStates data to be discarded. 2023-01-15 21:58:17 +00:00
CalDescent
de47a94677 Fixed bug causing initial latestATStates data to be discarded. 2023-01-15 15:51:10 +00:00
CalDescent
bd4c47dba6 Rework of AT state trimming and pruning, in order to more reliably track the "latest" AT states.
This should fix an edge case where AT states data was pruned/trimmed but it was then later required in consensus. The older state was deleted because it was replaced by a new "latest" state in a brand new block. But once the new "latest" state was orphaned from the block, the old "latest" state was then required again.

This works around the problem by excluding very recent blocks in the latest AT states data, so that it is unaffected by real-time sync activity.

The trade off is that we could end up retaining more AT states than needed, so a secondary cleanup process may need to run at some time in the future to remove these. But it should only be a minimal amount of data, and can be cleaned up with a single query. This would have been happening to a certain degree already.
2023-01-15 14:32:33 +00:00
CalDescent
c03f271825 Keep track of peers which are too divergent, and return an isTooDivergent boolean in /peers APIs.
isTooDivergent will be true or false if a definitive decision has been made, or missing from the response if not yet known. Therefore it should be safe to treat `"isTooDivergent": false` as a peer that is on the same chain.
2023-01-15 12:44:19 +00:00
CalDescent
dfe3754afc Block connections with peers older than 3.8.2, as those versions are nonfunctional due to recent feature triggers. 2023-01-15 12:07:27 +00:00
CalDescent
30105199a2 Default pruneBlockLimit increased from 1450 to 6000 (approx 5 days), to be more similar to the AT states retention time of full nodes. 2023-01-15 12:00:32 +00:00
CalDescent
e91e612b55 Added checkpoint lookup on startup.
Currently enabled for topOnly nodes only. This will detect if the node is on a divergent chain, and will force a bootstrap or resync (depending on settings) in order to rejoin the main chain.
2023-01-15 11:33:16 +00:00
CalDescent
2a55eba1f7 Updated AdvancedInstaller project for v3.8.3 2023-01-15 11:28:37 +00:00
CalDescent
39e59cbcf8 Bump version to 3.8.3 2023-01-14 18:47:46 +00:00
CalDescent
016191bdb0 Reduce log spam when a QDN resource can't be found due to it not being published. 2023-01-14 15:15:48 +00:00
CalDescent
0596a07c7d Reduced ArbitraryDataFileRequestThread count from 10 to 5, to reduce network flooding. 2023-01-14 12:58:35 +00:00
CalDescent
c62c59b445 Use correct timeout (12s) when sending arbitrary data to a peer, and improved logging. 2023-01-14 12:57:44 +00:00
CalDescent
f78101e9cc Updated a default bootstrap host to use a domain instead of its IP. 2023-01-14 11:07:54 +00:00
CalDescent
476fdcb31d Added serialization tests for chatReference, and grouped with other serialization tests into a single package. 2023-01-14 10:38:50 +00:00
CalDescent
02d5043ef7 Added missing calls to electrumX.setBlockchain(instance); for DGB and RVN. Thanks to @QuickMythril for noticing this. 2023-01-13 20:17:27 +00:00
CalDescent
0ad9e2f65b Added QCHAT_ATTACHMENT service, with custom validation function. 2023-01-13 20:08:47 +00:00
CalDescent
4dc0033a5a Added missing chatReferenceTimestamp in unit tests. 2023-01-13 19:45:52 +00:00
CalDescent
745cfe8ea1 chatReferenceTimestamp set to 1674316800000 (Sat, 21 Jan 2023 16:00:00 GMT) 2023-01-13 19:45:38 +00:00
CalDescent
6284a4691c Import test transactions as part of the serialization tests, to catch any issues with db schema data lengths. 2023-01-13 19:28:44 +00:00
CalDescent
41f88be55e Test serialization of CHAT transactions 2023-01-13 19:27:38 +00:00
CalDescent
ba95f8376f Increase CHAT transaction data limits to the maximum (4000 bytes) to allow for upcoming UI features. 2023-01-13 19:27:02 +00:00
CalDescent
8e97c05b56 Added missing feature trigger from unit tests. 2023-01-13 19:25:06 +00:00
CalDescent
2c78f4b45b Fixed typo and reworded "methods" to "actions", for consistency with the code. 2023-01-13 18:25:30 +00:00
CalDescent
613ce84df8 More documentation updates 2023-01-13 18:11:44 +00:00
CalDescent
2822d860d8 Fixed sample app 2023-01-13 18:01:38 +00:00
CalDescent
5a052a4f67 Documentation updates 2023-01-13 17:57:01 +00:00
CalDescent
32c2f68cb1 Initial APIs and core support for Q-Apps 2023-01-13 17:36:27 +00:00
CalDescent
4232616a5f Fixed QDN website preview functionality. 2023-01-13 12:07:24 +00:00
CalDescent
8ddcae249c Added gatewayLoopbackEnabled setting (default false) to allow serving gateway requests via localhost.
Useful for testing, but not recommended for production environments.
2023-01-13 12:05:57 +00:00
CalDescent
eb569304ba Improved refund/refundAll HTLC code, to handle cases where there have been multiple purchase attempts for the same AT. 2023-01-06 10:38:25 +00:00
CalDescent
b0486f44bb Added chat_reference index to speed up searches. 2023-01-02 17:47:36 +00:00
CalDescent
cecf28ab7b Merge branch 'chat-reference'
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
#	src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java
#	src/main/resources/blockchain.json
#	src/test/resources/test-chain-v2-block-timestamps.json
#	src/test/resources/test-chain-v2-disable-reference.json
#	src/test/resources/test-chain-v2-founder-rewards.json
#	src/test/resources/test-chain-v2-leftover-reward.json
#	src/test/resources/test-chain-v2-minting.json
#	src/test/resources/test-chain-v2-qora-holder-extremes.json
#	src/test/resources/test-chain-v2-qora-holder-reduction.json
#	src/test/resources/test-chain-v2-qora-holder.json
#	src/test/resources/test-chain-v2-reward-levels.json
#	src/test/resources/test-chain-v2-reward-scaling.json
#	src/test/resources/test-chain-v2-reward-shares.json
#	src/test/resources/test-chain-v2.json
2023-01-02 17:32:38 +00:00
CalDescent
98b92a5bf1 Introduced "historic threshold" to ARBITRARY transactions in order to save on verification times of older transactions.
This is based on the approach used for PUBLICIZE transactions.
2023-01-02 16:58:50 +00:00
CalDescent
6b45901c47 Fixed validation of existing reward share transactions. 2022-12-31 14:43:37 +00:00
CalDescent
166f9bd079 Bump version to 3.8.2 2022-12-24 21:28:02 +00:00
CalDescent
2f8f896077 Merge remote-tracking branch 'catbref/bugfix-deploy-at' 2022-12-24 16:01:23 +00:00
CalDescent
9a77aff0a6 Reduced difficulty of PUBLICIZE transactions from 15 to 14 (it is now the same as ARBITRARY transactions) 2022-12-24 14:10:49 +00:00
CalDescent
c6d65a88dc Increase mempow difficulty and threshold in ChatTransaction, to match the values in the UI. 2022-12-22 18:19:27 +00:00
CalDescent
4aea29a91b Improved PublicizeTransaction validation. 2022-12-22 18:03:29 +00:00
CalDescent
0e81665a36 Revert "Filter out peers of divergent or significantly inferior chains when syncing."
This reverts commit 1dc7f056f9. To be un-reverted in future when there is more time available for testing.
2022-12-22 15:10:19 +00:00
CalDescent
2a4ac1ed24 Limit to 250 CHAT messages per hour per account. 2022-12-22 15:09:04 +00:00
CalDescent
bb74b2d4f6 MAX_AVG_RESPONSE_TIME for ElectrumX servers increased from 0.5s to 1s. 2022-12-22 14:25:10 +00:00
CalDescent
758a02d71a Log Pirate light client server address if the wallet unable to be initialized. 2022-12-22 14:23:30 +00:00
CalDescent
7ae142fa64 Improved transaction validation. 2022-12-22 14:20:42 +00:00
CalDescent
a75ed0e634 Bump additional expandedAccount level references held in memory. 2022-12-22 14:18:39 +00:00
CalDescent
e40dc4af59 Fixed group ban expiry. 2022-12-22 14:16:57 +00:00
CalDescent
e678ea22e0 Fixed NPE in unit tests. Still need to work out how/when this was introduced. 2022-12-18 18:33:51 +00:00
CalDescent
cf3195cb83 Set "minAccountsToActivateShareBin" to 0 for certain tests. 2022-12-18 18:32:06 +00:00
CalDescent
80048208d1 Moved some test sponsorship utility methods to AccountUtils, so they can be used in other test classes too. 2022-12-15 12:14:42 +00:00
CalDescent
08de1fb4ec Disallow CHAT transactions with timestamps more than 5 minutes in the future. 2022-12-14 16:40:57 +00:00
CalDescent
99d5bf9103 Disallow transactions with timestamps more than 30 mins in the future (reduced from 24 hours) 2022-12-14 16:40:11 +00:00
CalDescent
1dc7f056f9 Filter out peers of divergent or significantly inferior chains when syncing. 2022-12-14 16:39:43 +00:00
CalDescent
cdeb2052b0 Bump version to 3.8.1 2022-12-08 18:26:34 +00:00
CalDescent
5c9109aca9 minPeerVersion set to 3.8.0 2022-12-08 18:25:19 +00:00
CalDescent
ccc1976d00 Added defensiveness 2022-12-08 18:25:03 +00:00
CalDescent
12fb6cd0ad onlineAccountMinterLevelValidationHeight moved forward to block 1092000 2022-12-08 18:24:34 +00:00
CalDescent
6f95e7c1c8 Bump version to 3.8.0 2022-12-05 21:57:32 +00:00
CalDescent
a69618133e Level 0 online account removals moved inside feature trigger, so it is coordinated with the new validation. 2022-12-05 21:34:26 +00:00
CalDescent
51ad0a5b48 onlineAccountMinterLevelValidationHeight set to 1093400 (approx 20 hours later) 2022-12-05 19:38:44 +00:00
CalDescent
45a6f495d2 selfSponsorshipAlgoV1Height set to 1092400 (approx 4pm UTC on Sat 10th December) 2022-12-05 19:38:26 +00:00
CalDescent
4d9964c080 Block connections with peers older than 3.7.0, as this has been released for long enough now. 2022-12-05 18:52:33 +00:00
CalDescent
9afc31a20d selfSponsorshipAlgoV1SnapshotTimestamp set to 1670230000000 2022-12-05 08:52:09 +00:00
CalDescent
d435e4047b SelfSponsorshipAlgoV1 2022-12-05 08:21:45 +00:00
CalDescent
c108afa27c Self sponsorship algo tests 2022-12-04 20:57:36 +00:00
CalDescent
eea42b56ee Added SelfSponsorshipAlgoV1Block, and call it when processing/orphaning a block at an undecided future height. 2022-12-04 18:21:01 +00:00
CalDescent
f4d20e42f3 Disallow TRANSFER_PRIVS transactions if the sending account has a penalty. Again, there will be no penalties until the algo runs, so it's safe without a feature trigger. 2022-12-04 11:54:05 +00:00
CalDescent
f14cc374c6 Include blocksMintedPenalty in effectiveBlocksMinted. This will be zero until the algo runs, so doesn't need a feature trigger. 2022-12-04 11:52:51 +00:00
CalDescent
99ba4caf75 We definitely can't retroactively validate minter levels, because there are confirmed cases in the chains history where this fails. Set to 999999999 until we have decided on a future block height. 2022-12-04 11:50:58 +00:00
catbref
ae991dda4d Fix creatorPublicKey not being unmarshaled when calling POST /at to deploy an AT 2022-11-28 21:52:37 +00:00
CalDescent
2b6ae57a27 Merge branch 'master' into chat-reference
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
#	src/main/resources/blockchain.json
#	src/test/resources/test-chain-v2-block-timestamps.json
#	src/test/resources/test-chain-v2-disable-reference.json
#	src/test/resources/test-chain-v2-founder-rewards.json
#	src/test/resources/test-chain-v2-leftover-reward.json
#	src/test/resources/test-chain-v2-minting.json
#	src/test/resources/test-chain-v2-qora-holder-extremes.json
#	src/test/resources/test-chain-v2-qora-holder-reduction.json
#	src/test/resources/test-chain-v2-qora-holder.json
#	src/test/resources/test-chain-v2-reward-levels.json
#	src/test/resources/test-chain-v2-reward-scaling.json
#	src/test/resources/test-chain-v2-reward-shares.json
#	src/test/resources/test-chain-v2.json
2022-11-27 20:06:55 +00:00
CalDescent
5ff7b3df6d hasInvalidSigner() now only checks the chain tip block, to reduce the amount of unintended side effects that can occur. 2022-11-27 19:59:46 +00:00
CalDescent
76686eca21 Modified reward share creation in test/common/AccountUtils to allow a fee to be specified. 2022-11-27 12:23:17 +00:00
CalDescent
3965f24ab5 Fixed bug 2022-11-26 19:06:02 +00:00
CalDescent
a75fd14e45 Added Account method needed for unit tests. 2022-11-26 17:23:43 +00:00
CalDescent
41cdf665ed Code tidy 2022-11-26 16:53:38 +00:00
CalDescent
6ea3c0e6f7 Give founder accounts as an effective minting level of 0 if they have a penalty. 2022-11-26 16:19:43 +00:00
CalDescent
5f0263c078 Modifications to block minting for unit tests, in order to solve an NPE and give more options to callers.
This shouldn't affect the behaviour of existing tests, other than an NPE being replaced with an assertNotNull().
2022-11-26 16:14:45 +00:00
CalDescent
58e5d325ff Added algo feature triggers to BlockChain.java (at future undecided block height & timestamp) 2022-11-26 16:10:18 +00:00
CalDescent
7003a8274b Added some API endpoints relating to penalties. Relies on some code not yet committed. 2022-11-26 15:51:06 +00:00
CalDescent
ab687af4bb Added new db query to fetch a list of all accounts that have created a non-self-share, based on confirmed transactions.
This will be used as the input dataset for the self sponsorship algo.
2022-11-26 12:21:43 +00:00
CalDescent
f50c0c87dd Account repository modifications for blocksMintedPenalty. 2022-11-26 12:03:13 +00:00
CalDescent
9c3a4d6e37 BlockChain.java additions for onlineAccountMinterLevelValidationHeight, which were missing from commit 68a0923 2022-11-26 11:55:27 +00:00
CalDescent
1c8a6ce204 When synchronizing, filter out peers that have a recent block with an invalid signer.
This avoids the wasted time and consensus confusion causes by syncing and then validation failing. This is significant after the algo has run, as many signers will become invalid.
2022-11-26 11:52:27 +00:00
CalDescent
68a0923582 Disallow level 0 minters in blocks, and exclude them when minting a new block.
The validation is currently set to a feature trigger of height 0, although this will likely be set to a future block, in case there are any cases in the chain's history where this validation may fail (e.g. transfer privs?)
2022-11-26 11:37:02 +00:00
CalDescent
617c801cbd Made Block.ExpandedAccount public, and added some more getters. This is needed for upcoming additional validation and unit tests. 2022-11-26 11:25:44 +00:00
CalDescent
b0c9ce7482 Add blocks minted penalty to Accounts table 2022-11-26 11:19:23 +00:00
CalDescent
4e829a2d05 Bump version to 3.7.0 2022-11-07 21:12:34 +00:00
CalDescent
a7402adfa5 Merge branch 'null-owned-groups' 2022-11-06 21:20:19 +00:00
CalDescent
9255df46cf Script updates to support add/remove dev group admins 2022-11-06 19:46:12 +00:00
CalDescent
db22445948 Include API key automatically in publish-auto-update-v5.pl 2022-11-06 14:52:14 +00:00
CalDescent
818e037e75 Merge branch 'master' into null-owned-groups 2022-11-06 13:08:54 +00:00
CalDescent
9c68f1038a Bump AT version to 1.4.0 2022-11-05 14:02:04 +00:00
CalDescent
10ae383bb6 Merge pull request #102 from catbref/faster-qort-buy-PoC
Proof of concept: speed up QORT buying
2022-11-01 18:55:21 +00:00
catbref
aead9cfcbf Proof of concept: speed up QORT buying
When users buy QORT ("Alice"-side), most of the API time is spent computing mempow for the MESSAGE sent to Bob's AT.
This is the final stage startResponse() and after Alice's P2SH is already broadcast.

To speed this up, the MESSAGE part is moved into its own thread allowing startResponse() to return sooner, improving the user experience.

Caveats:
If MESSAGE importAsUnconfirmed() somehow fails the the buy won't complete and Alice will have to wait for P2SH refund.
If Alice shuts down her node while MESSAGE mempow is being computed then it's possible the shutdown will be blocked until mempow is complete.

Currently only implemented in LitecoinACCTv3TradeBot as this is only proof-of-concept.
Tested with multiple buys in the same block.
2022-11-01 08:55:57 +00:00
CalDescent
055775b13d Include a list of files in the QDN metadata. 2022-10-30 18:54:38 +00:00
CalDescent
985c195e9e Added GIF_REPOSITORY, with custom validation function and unit tests. 2022-10-30 17:33:21 +00:00
CalDescent
0628847d14 Removed QORTAL_METADATA service tests. 2022-10-30 17:25:11 +00:00
CalDescent
4043ae1928 Added QCHAT_IMAGE service (with 500KB file size limit). 2022-10-30 17:23:46 +00:00
CalDescent
fa80c83864 Remove QORTAL_METADATA service as this uses its own protocol instead. 2022-10-30 17:07:56 +00:00
CalDescent
f739d8f5c6 Added increaseOnlineAccountsDifficultyTimestamp feature trigger to unit tests. 2022-10-28 18:06:34 +01:00
CalDescent
166425bee9 Added feature trigger timestamp (TBC) to increase online accounts mempow difficulty (also TBC). 2022-10-28 17:20:39 +01:00
CalDescent
59a804c560 Include "blocks remaining" in systray when syncing from more than 60 minutes away from a peer's chain tip. 2022-10-28 16:57:52 +01:00
CalDescent
b64c053531 Reuse the work buffer when verifying online accounts from the OnlineAccountsManager import queue.
This is a hopeful fix for extra memory usage since mempow activated, due to adding a lot of load to the garbage collector. It only applies to accounts verified from the import queue; the optimization hasn't been applied to block processing. But verifying online accounts when processing blocks is rare and generally would only last a short amount of time.
2022-10-28 16:54:53 +01:00
CalDescent
30cd56165a Speed up syncing blocks in the range of 1-12 hours ago by caching the valid online accounts. 2022-10-28 16:02:25 +01:00
CalDescent
510328db47 Removed unused timestamp value. 2022-10-28 15:50:43 +01:00
CalDescent
9d74f0eec0 Added haschatreference, with possible values of true, false, or null, to allow optional filtering by the presence or absense of a chat reference. 2022-10-24 19:21:29 +01:00
CalDescent
09014d07e0 Fixed issues retrieving chatReference from the db. 2022-10-23 19:29:31 +01:00
CalDescent
f83d4bac7b Reduced online accounts mempow difficulty to 5 on testnets.
This allows testnets to more easily coexist on the same machines that are running a mainnet instance, and still tests the mempow computation and verification in a non-resource-intensive way.
2022-10-23 17:01:58 +01:00
CalDescent
b3273ff01a Removed all mempow feature trigger conditionals.
We no longer need all the code complexity, now that 24 hours have passed since activation. We don't validate online accounts beyond 12 hours, and the data is trimmed after 24 hours.
2022-10-23 16:47:42 +01:00
CalDescent
1dd039fb2d Merge branch 'master' into chat-reference 2022-10-23 14:14:23 +01:00
CalDescent
1d5497e484 Modifications to support a single node testnet:
- Added "singleNodeTestnet" setting, allowing for fast and consecutive block minting, and no requirement for a minimum number of peers.
- Added "recoveryModeTimeout" setting (previously hardcoded in Synchronizer).
- Updated testnets documentation to include new settings and a quick start guide.
- Added "generic" minting account that can be used in testnets (not functional on mainnet), to simplify the process for new devs.
2022-10-23 14:13:38 +01:00
CalDescent
b37aa749c6 Removed onlineAccountsMemPoWEnabled setting as it's no longer needed. 2022-10-22 19:34:24 +01:00
CalDescent
e45ad37eb5 Fixed bug which could prevent invalid accounts being removed from the queue until the next valid one is added. 2022-10-22 19:30:08 +01:00
CalDescent
72985b1fc6 Reduce log spam, especially around the time of node startup before online accounts have been retrieved.
We expect a "Couldn't build a to-be-minted block" log on every startup due to trying to mint before having any accounts. This one has moved from error to info level because error logs can be quite intrusive when using an IDE.
2022-10-22 19:24:54 +01:00
CalDescent
6f27d3798c Improved online accounts processing, to avoid creating keys in the map before validation. 2022-10-22 19:18:41 +01:00
CalDescent
23a5c5f9b4 Fixed bug in original commit - we need to save the chat reference to the db. 2022-10-22 12:50:28 +01:00
CalDescent
a4759a0ef4 Re-ordered chat transaction transformation, to simplify UI code. New additions are now at the end of the data bytes. 2022-10-22 12:43:40 +01:00
CalDescent
910191b074 Added optional chatReference field to CHAT transactions.
This allows one message to reference another, e.g. for replies, edits, and reactions. We can't use the existing reference field as this is used for encryption and generally points to the user's lastReference at the time of signing.

"chatReference" is based on the "nameReference" field used in various name transactions, for similar purposes.

This needs a feature trigger timestamp to activate, and that same timestamp will need to be used in the UI since that is responsible for building the chat transactions.
2022-10-21 15:58:23 +01:00
CalDescent
57125a91cf Bump version to 3.6.4 2022-10-15 18:59:42 +01:00
CalDescent
3c565638c1 onlineAccountsMemoryPoWTimestamp set to Sat Oct 22 2022 16:00:00 UTC 2022-10-15 18:58:13 +01:00
CalDescent
c2d02aead9 Default minPeerVersion set to 3.6.3 2022-10-14 18:44:25 +01:00
CalDescent
0d9aafaf4e Reduced log spam 2022-10-14 17:03:10 +01:00
CalDescent
3844358380 Mark a peer as misbehaved if it fails to respond with a usable block 3 times in a row.
This should help to workaround deserialization and missing response issues.
2022-10-14 16:38:05 +01:00
CalDescent
b4125d2bf1 Fix for NPE in verifyMemoryPoW() 2022-10-14 11:34:46 +01:00
CalDescent
5c223179ed Updated AdvancedInstaller project for v3.6.3 2022-10-13 23:37:21 +01:00
CalDescent
f3cb57417a Merge branch 'master' of github.com:Qortal/qortal 2022-10-13 23:36:27 +01:00
CalDescent
7c7f071eba Bump version to 3.6.3 2022-10-12 08:54:27 +01:00
CalDescent
7c15d88cbc Fix for issue in BLOCK_SUMMARIES_V2 when sending an empty array of summaries.
The BLOCK_SUMMARIES message type would differentiate between an empty response and a missing/invalid response. However, in V2, a response with empty summaries would throw a BufferUnderflowException and be treated by the caller as a null message.

This caused problems when trying to find a common block with peers that have diverged by more than 8 blocks. With V1 the caller would know to search back further (e.g. 16 blocks) but in V2 it was treated as "no response" and so the caller would give up instead of increasing the look-back threshold.

This fix will identify BLOCK_SUMMARIES_V2 messages with no content, and return an empty array of block summaries instead of a null message.

Should be enough to recover any stuck nodes, as long as they haven't diverged more than 240 blocks from the main chain.
2022-10-12 08:52:58 +01:00
CalDescent
d4aaba2293 Bump version to 3.6.2 2022-10-10 19:06:08 +01:00
CalDescent
10d3176e70 Revert "Always use BlockSummariesMessage V1 (instead of V2) when responding to GetBlockSummaries requests."
This reverts commit 2d58118d7c.
2022-10-10 10:28:44 +01:00
CalDescent
36fcd6792a Discard BLOCK_SUMMARIES_V2 messages with an ID (thanks to @catbref for the code)
This is a better fix for the "contaminated chain tip summaries" issue. Need to reduce the logging level to debug before release.
2022-10-10 10:28:36 +01:00
CalDescent
cb1eee8ff5 GenericUnknownMessage.MINIMUM_PEER_VERSION set to 3.6.1.
This should ideally have been set in the 3.6.1 release, but not setting it is unlikely to have caused any problems.
2022-10-09 20:37:39 +01:00
CalDescent
2d58118d7c Always use BlockSummariesMessage V1 (instead of V2) when responding to GetBlockSummaries requests.
This should hopefully fix a potential issue where peer's chain tip data becomes contaminated with other summary data, causing incorrect sync decisions.
2022-10-09 20:11:01 +01:00
CalDescent
e6bb0b81cf Revert "Reduce INITIAL_BLOCK_STEP from 8 to 7."
This reverts commit 0088ba8485.
2022-10-09 19:11:20 +01:00
Nuc1eoN
8ddf4c9f9f Add polish translation 2022-10-09 15:35:19 +02:00
CalDescent
77d60fc33f Revert "Skip GET_BLOCK_SUMMARIES requests if it can already be fulfilled entirely from the peer's chain tip block summaries cache."
This reverts commit 8cedf618f4.
2022-10-09 14:11:28 +01:00
CalDescent
504f38b42a Merge pull request #97 from Nuc1eoN/patch-1
Mark start/stop scripts as executables
2022-10-08 19:49:10 +01:00
Nuc1eoN
3a18599d85 Mark start/stop scripts as executables
The `start.sh` & `stop.sh` scripts have already been marked as executables in the source folder... But since we have only piped their contents, we need to set correct file permissions again.
2022-10-07 23:35:35 +02:00
CalDescent
0088ba8485 Reduce INITIAL_BLOCK_STEP from 8 to 7.
This allows the first pass to always be served from the peer's cache of 8 summaries. This allows a maximum of 7 to be returned, because the 8th spot is needed for the parent block's signature.
2022-10-07 14:47:46 +01:00
CalDescent
8cedf618f4 Skip GET_BLOCK_SUMMARIES requests if it can already be fulfilled entirely from the peer's chain tip block summaries cache.
Loading from the cache should speed up sync decisions, particularly when choose which peer to sync from. The greater the number of connected peers, the more significant this optimization will be. It should also reduce wasted network requests and data usage.

Adding this check prior to making a network request is a simple way to introduce the new cached summaries from BLOCK_SUMMARIES_V2 without having to rewrite a lot of the complex sync / peer comparison logic. Longer term we may want to rewrite that logic to read from the cache directly, but it doesn't make sense to introduce that level of risk at this point time, especially as the Synchronizer may be rewritten soon to prefer longer chains.

Even so, this is still quite a high risk commit so lots of testing will be needed.
2022-10-07 14:46:09 +01:00
CalDescent
fdd95eac56 Limit to 240 blocks in syncToPeerChain().
Should fix OutOfMemoryException often seen when syncing from 1000+ blocks behind the chain tip.
2022-10-07 11:05:24 +01:00
CalDescent
10b0f0a054 Catch JSON exceptions in PirateChainWalletController.
This could prevent additional wallets from being initialized if connection was lost while syncing an existing one.
2022-10-05 15:29:29 +01:00
CalDescent
1233ba6703 Bump version to 3.6.1 2022-10-04 20:08:30 +01:00
CalDescent
c35c7180d4 Return empty levels in GET /addresses/online/levels 2022-10-03 10:58:47 +01:00
CalDescent
7080b55aac Reintroduced initial sleep period in block archiver. 2022-09-25 19:43:56 +01:00
CalDescent
3890fa8490 Renamed constant for consistency 2022-09-25 18:46:33 +01:00
CalDescent
a9721bab3d Fixed issue causing startup of various components to be delayed by 30 seconds. 2022-09-25 18:39:56 +01:00
CalDescent
1bb8f1b6d2 Fixed bug in last commit.
We need to track items to remove separately from items to add, otherwise invalid accounts remain in the queue.
2022-09-25 12:36:00 +01:00
CalDescent
765416db71 Yet another attempt to optimize the online accounts import queue processing.
The main difference here is that we now remove items from the onlineAccountsImportQueue in a batch, _after_ they have been imported. This prevents duplicates from being added to the queue in the previous time gap between them being removed and imported.
2022-09-25 12:26:00 +01:00
CalDescent
5989473c8a Revert "Allow duplicate variations of each OnlineAccountData in the import queue, but don't allow two entries that match exactly."
This reverts commit 6d9e6e8d4c.
2022-09-25 12:06:14 +01:00
CalDescent
aa9da45c01 Added optional filtering by reference in GET /chat/messages 2022-09-25 11:38:17 +01:00
CalDescent
4681218416 Include total count in debug trade presence logging 2022-09-24 15:49:29 +01:00
CalDescent
5c746f0bd9 Fixed bug which required a node to hold local trade presences before it would request any.
This caused large gaps with no presence data. They are removed when they expire, causing the local count to drop to zero, and the node would only start requesting them again once a peer had pushed one or more entries proactively.
2022-09-24 15:48:45 +01:00
CalDescent
309f27a6b8 Moved error to debug, as we now get a burst of these soon after startup, due to commit 99858f3.
This also shows that commit 99858f3 now prevents a block candidate with a very small number of online accounts being built immediately after startup.
2022-09-24 15:21:01 +01:00
CalDescent
d2ebb215e6 Fixed Synchronizer.getBlockSummaries() which was expecting BLOCK_SUMMARIES, but updated peers send BLOCK_SUMMARIES_V2 2022-09-24 14:36:49 +01:00
CalDescent
7a60f713ea Fixed error in rebase. 2022-09-24 14:35:02 +01:00
CalDescent
e80dd31fb4 BlockSummariesV2Message.MINIMUM_PEER_VERSION set to 3.6.1 2022-09-24 13:53:27 +01:00
catbref
94cdc10151 Initial work on BLOCK_SUMMARIES_V2, part of a bigger arc to improve synchronization.
Touches quite a few files because:

* Deprecate HEIGHT_V2 because it doesn't contain enough info to be fully useful during sync.
Newer peers will re-use BLOCK_SUMMARIES_V2.

* For newer peers, instead of sending / broadcasting HEIGHT_V2,
send top N block summaries instead, to avoid requests for minor reorgs.

* When responding to GET_BLOCK, and we don't actually have the requested block,
we currently send an empty BLOCK_SUMMARIES message instead of not responding,
which would cause a slow timeout in Synchronizer.

This pattern has spread to other network message response code,
so now we introduce a generic 'unknown' message type for all these cases.

* Remove PeerChainTipData class entirely and re-use BlockSummaryData instead.

* Each Peer instance used to hold PeerChainTipData - essentially single latest block summary - but now holds a List of latest block summaries.

* PeerChainTipData getter/setter methods modified for compatibility at this point in time.

* Repository methods that return BlockSummaryData (or lists of) now try to fully populate them,
including newly added block reference field.

* Re-worked Peer.canUseCommonBlockData() to be more readable

* Cherry-picked patch to Message.fromByteBuffer() to pass an empty, read-only ByteBuffer to subclass fromByteBuffer() methods, instead of null.
This allows natural use of BufferUnderflowException if a subclass tries to use read(), or hasRemaining(), etc. from an empty data-payload message.
Previously this could have caused an NPE.
2022-09-24 13:48:01 +01:00
CalDescent
863a5eff97 Moved various online accounts logs to TRACE level, to make it easier to monitor the queue processing when in DEBUG. 2022-09-24 13:11:28 +01:00
CalDescent
5b81b30974 Modified online accounts request interval, and introduced bursting.
It will now request online accounts every 1 minute instead of every 5 seconds, except for the first 5 minutes following a new online accounts timestamp, in which it will request every 5 seconds (referred to as the "burst" interval). It will also use the burst interval for the first 5 minutes after the node starts.

This is based on the idea that most online accounts arrive soon after a new timestamp begins, and so there is no need to request accounts so frequently after that. This should reduce data usage by a significant amount.

Once mempow is fully rolled out, the "burst" feature can be reduced or removed, since online accounts will be sent ahead of time, generally 15-30 mins prior to the new online accounts timestamp becoming active.
2022-09-24 13:02:27 +01:00
CalDescent
174a779e4c Add accounts from the import queue individually, and then skip future duplicates before unnecessarily validating them again.
This closes a gap where accounts would be moved from onlineAccountsImportQueue to onlineAccountsToAdd, but not yet imported. During this time, there was nothing to stop them from being added to the import queue again, causing duplicate validations.
2022-09-24 10:56:52 +01:00
CalDescent
c7cf33ef78 Set hasOurOnlineAccounts to true if one of our accounts is found before signing. 2022-09-24 10:23:55 +01:00
CalDescent
ea4f4d949b When validating online accounts, enforce mempow if the online account's timestamp is after the feature trigger. 2022-09-23 19:45:59 +01:00
CalDescent
6d9e6e8d4c Allow duplicate variations of each OnlineAccountData in the import queue, but don't allow two entries that match exactly. 2022-09-23 18:46:01 +01:00
CalDescent
99858f3781 Wait 30 seconds after the node starts before computing our online accounts.
This allows some time for initial online account lists to be retrieved, and reduces the chances of the same nonce being computed twice.
2022-09-23 18:28:41 +01:00
CalDescent
84a16157d1 Don't add online accounts to the import queue if they are already validated 2022-09-23 18:02:46 +01:00
CalDescent
49d83650f4 Removed online accounts V2 and V1 messaging, as the V3 format will soon be required due to the nonce values. 2022-09-23 15:25:44 +01:00
CalDescent
951c85faf1 Fixed bug causing error 500 in some cases. 2022-09-20 22:26:30 +01:00
CalDescent
84d42b93e1 Reordered code in Block.mint() to fix potential issue after mempow activates. 2022-09-20 08:50:37 +01:00
CalDescent
b99b1f5d57 Bump version to 3.6.0 2022-09-19 17:29:26 +01:00
CalDescent
952c51ab25 QORA / block reward adjustments set to activate at height 1010000 2022-09-19 17:27:07 +01:00
CalDescent
64ef8ab863 OnlineAccountsV3Message.MIN_PEER_VERSION set to 3.6.0 2022-09-19 16:36:39 +01:00
CalDescent
93fd80e289 Require that add/remove admin transactions can only be created by group members.
For regular groups, we require that the owner adds/removes the admins, so group membership is adequately checked. However for null-owned groups this check is skipped. So we need an additional condition to prevent non-group members from issuing a transaction for approval by the group admins.
2022-09-19 16:34:31 +01:00
CalDescent
5581b83c57 Added initial admin approval features for groups owned by the null account.
* The dev group (ID 1) is owned by the null account with public key 11111111111111111111111111111111
 * To regain access to otherwise blocked owner-based rules, it has different validation logic
 * which applies to groups with this same null owner.
 *
 * The main difference is that approval is required for certain transaction types relating to
 * null-owned groups. This allows existing admins to approve updates to the group (using group's
 * approval threshold) instead of these actions being performed by the owner.
 *
 * Since these apply to all null-owned groups, this allows anyone to update their group to
 * the null owner if they want to take advantage of this decentralized approval system.
 *
 * Currently, the affected transaction types are:
 * - AddGroupAdminTransaction
 * - RemoveGroupAdminTransaction
 *
 * This same approach could ultimately be applied to other group transactions too.
2022-09-19 11:03:06 +01:00
CalDescent
5017072f6c Use path parameter instead of query string. 2022-09-17 13:50:04 +01:00
CalDescent
02ac6dd8c1 Added GET /chat/message/{signature} endpoint.
This will ease the transition to a Q-Chat protocol, where chat messages will no longer be regular transactions.
2022-09-17 13:28:32 +01:00
CalDescent
858269f6cb ChatTransaction MAX_DATA_SIZE increased from 256 to 1024 bytes, to allow for new UI features. 2022-09-17 12:21:56 +01:00
CalDescent
791a9b78ec Added support for Pirate Chain wallets on FreeBSD. 2022-09-17 10:36:25 +01:00
CalDescent
aff49e6bdf Added support for ARRR refunds via /crosschain/htlc/refund/{ataddress} and /crosschain/htlc/refundAll
This could probably be refactored into multiple classes to make the code cleaner, but it is functional for now.
2022-09-17 10:30:10 +01:00
CalDescent
2d29fdca00 Allow BTC trades in redeemAll / refundAll, since most will now be using ACCTv3. 2022-09-16 11:19:10 +01:00
CalDescent
063ef8507b Fix for NPE in last commit 2022-09-11 20:04:51 +01:00
CalDescent
f042b5ca5f If mempow is active, remove any legacy accounts from a to-be-minted block that are missing a nonce. 2022-09-11 11:41:11 +01:00
CalDescent
a10e669554 Allow nonce to be computed for "next" timestamp if mempow is enabled in settings. 2022-09-11 11:36:19 +01:00
CalDescent
501f66ab00 BlockTransformer updates necessary for mempow online accounts.
Using the feature trigger timestamp here should be much less error prone than a whole new block message version. Once mempow has been live for at least 24 hours, the feature trigger can be removed and the code cleaned up, as all online accounts signatures will use the new format from that time onwards (legacy signatures are trimmed after 24 hours).
2022-09-10 18:31:01 +01:00
CalDescent
6003ed3ff7 Revert "Use block's online accounts timestamp (instead of main timestamp) for the mempow hard fork."
This reverts commit 8cca6db316.
2022-09-10 17:23:39 +01:00
CalDescent
03e3619817 Revert "Use onlineAccountTimestamp for all mempow hard fork related code in OnlineAccountsManager too."
This reverts commit 23423102e7.
2022-09-10 17:23:34 +01:00
CalDescent
0e42e7b05a Merge branch 'master' into online-accounts-mempow-v2-block-updates
# Conflicts:
#	src/test/resources/test-chain-v2-no-sig-agg.json
2022-09-10 13:50:26 +01:00
CalDescent
d4fbc1687b Optionally exclude initial data from all trade websockets, using query string parameter excludeInitialData=true
Due to the large amount of data, it can take some time for the request to be processed and data to be transferred. It may make more sense to load the initial state from the standard API, and just use the websockets for updates.
2022-09-09 18:45:51 +01:00
DrewMPeacock
1abceada20 Fix up CREATE_POLL and VOTE_ON_POLL transactions to process and validate.
Added rule to enforce that a poll creator is also its owner.
2022-09-09 11:20:46 -06:00
CalDescent
8ffdc9b369 POST /crosschain/htlc/importarchivedtrades moved to POST /admin/repository/importarchivedtrades, as this is a repository operation not an HTLC one. 2022-09-09 18:01:20 +01:00
CalDescent
c883dd44c8 Remove LitecoinACCTv2 as it is no longer being used, and is unsafe. 2022-09-09 17:40:10 +01:00
CalDescent
667530e202 Remove DogecoinACCTv2 as it is no longer being used, and is unsafe. 2022-09-09 17:12:44 +01:00
CalDescent
5807d6e0dc Merge branch 'shares-by-level-rework' 2022-09-09 11:35:56 +01:00
CalDescent
ba4eeed358 Modified GET /arbitrary/resources endpoint (and underlying db queries) to allow filtering names by a list, e.g. "followedNames" or "blockedNames". 2022-09-07 18:42:24 +01:00
CalDescent
82edc4d9f3 OnlineAccountsV3Message MIN_PEER_VERSION set to 3.5.1 2022-09-07 18:26:30 +01:00
CalDescent
2a0d5746e6 Only compute "next" online account signature if mempow hard fork is active.
This minimizes the amount of differences in the first phase of the mempow rollout.
2022-09-04 13:19:32 +01:00
CalDescent
23423102e7 Use onlineAccountTimestamp for all mempow hard fork related code in OnlineAccountsManager too. 2022-09-04 11:51:24 +01:00
CalDescent
8879ec5bb4 Avoid computing the proof of work for each online account more than once. 2022-09-03 17:11:13 +01:00
CalDescent
8cca6db316 Use block's online accounts timestamp (instead of main timestamp) for the mempow hard fork.
This ensures that we cleanly switch from the old to new online account format, even if a block is minted retroactively.
2022-09-03 16:30:03 +01:00
CalDescent
effe1ac44d Merge branch 'master' into online-accounts-mempow-v2-block-updates
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
#	src/main/resources/blockchain.json
#	src/test/resources/test-chain-v2-no-sig-agg.json
2022-09-03 15:16:01 +01:00
CalDescent
ad4308afdf Added POST /crosschain/htlc/importarchivedtrades API endpoint.
This imports all trades from TradeBotStatesArchive.json into the repository.
2022-09-02 19:12:36 +01:00
CalDescent
6cfd85bdce Skip over ARRR orders in /refundAll and /redeemAll, as ARRR support hasn't been added for these yet.
This should fix 500 error which could prevent forced refund attempts for LTC and other chains. It wouldn't have affected any normal trade-bot refund functionality.
2022-09-02 18:05:12 +01:00
CalDescent
8b61247712 Added shareBinsByLevelV2.
This allows for different share bin distribution starting at an undecided future block height. This height will correspond with the QORA reduction. New values decided in recent community vote.
2022-09-02 16:43:46 +01:00
CalDescent
a9267760eb qoraHoldersShare reworked to qoraHoldersShareByHeight.
This allows the QORA share percentage to be modified at different heights, based on community votes. Added unit test to simulate a reduction.

# Conflicts:
#	src/test/java/org/qortal/test/minting/RewardTests.java
2022-08-29 09:40:20 +01:00
CalDescent
73396490ba Set walletsPath and listsPath to AppData folder for new Windows installs. 2022-08-27 19:44:31 +01:00
CalDescent
0b8fcc0a7b Bump version to 3.5.0 2022-08-26 19:32:58 +01:00
CalDescent
3d3ecbfb15 Merge branch 'master' into pirate-chain 2022-08-23 09:51:15 +01:00
CalDescent
9658f0cdd4 Revert "Rewrite of isNotOldPeer predicate, to fix logic issue."
This rewrite may have been causing problems with connections in the network, due to peers being forgotten too easily. Reverting for now to see if it solves the problem.

This reverts commit d81071f254.

# Conflicts:
#	src/main/java/org/qortal/network/Network.java
2022-08-21 12:18:40 +01:00
CalDescent
b23500fdd0 Attempt to hold peer connections for 1-4 hours instead of 5-60 mins, as the constant disconnections are causing too much data to be sent over and over.
If this proves to not have any significant bad effects on re-orgs, we could consider setting these even higher or even disabling the auto disconnect by default.
2022-08-21 12:16:44 +01:00
CalDescent
a1365e57d8 Don't log network stats if no messages have been received, as otherwise this floods the logs with empty stats due to failed connections. 2022-08-21 12:14:12 +01:00
CalDescent
d8ca3a455d Merge pull request #93 from catbref/peer-message-stats
Log count & total size of peer messages sent & received when a peer is disconnected
2022-08-21 11:41:37 +01:00
CalDescent
dcc943a906 Hopeful fix for InvalidKeyException seen in some JDK implementations. 2022-08-20 15:33:52 +01:00
CalDescent
cd2010bd06 More algo logging. 2022-08-20 12:50:24 +01:00
CalDescent
8cd16792a2 More logging relating to decryption failures. 2022-08-20 12:45:20 +01:00
CalDescent
4d97586f82 Log exception if AES decryption fails using specific algorithm settings. 2022-08-20 12:27:17 +01:00
CalDescent
3612fd8257 Removed unused throw 2022-08-20 11:58:16 +01:00
CalDescent
ff96868bd9 Log full stack trace if loading a QDN resource fails due to a DataException. 2022-08-20 11:57:53 +01:00
CalDescent
1694d4552e ArbitraryDataReader.deleteWorkingDirectory() is now optional. 2022-08-20 11:57:04 +01:00
CalDescent
bb1593efd2 Log full IOException stacktrace when obtaining Pirate Chain libraries. 2022-08-20 09:47:37 +01:00
CalDescent
4140546afb Ignore failures when deleting original QDN compressed file. 2022-08-20 09:45:48 +01:00
CalDescent
19197812d3 Log DataException during transaction validation. 2022-08-19 22:54:52 +01:00
CalDescent
168d32a474 Include memo for outgoing ARRR transactions. 2022-08-17 19:41:43 +01:00
CalDescent
a4fade0157 Validate wallet initialization result when restoring existing wallet. 2022-08-17 19:27:13 +01:00
CalDescent
2ea6921b66 Added more ARRR lightwalletd nodes 2022-08-17 19:26:47 +01:00
CalDescent
11ef31215b Fixed failing test 2022-08-17 19:25:45 +01:00
CalDescent
830a608b14 Include memo for incoming ARRR transactions. 2022-08-17 19:23:54 +01:00
CalDescent
57acf7dffe Updated text when downloading wallet files from QDN 2022-08-15 20:09:16 +01:00
CalDescent
9debebe03e Default birthday for ARRR moved to "arrrDefaultBirthday" (default 2000000).
This allows users to increase their default birthday if they know that no wallets were created before a certain block, to reduce sync time. It also fixed some failed unit tests that relied on transactions between blocks 1900000 and 2000000.
2022-08-13 16:48:03 +01:00
catbref
b17e96e121 Log count & total size of peer messages sent & received when a peer is disconnected. Requires org.qortal.net.Peer logging level set to DEBUG 2022-08-13 15:42:24 +01:00
CalDescent
b46c3cf95f Allow direct connection QDN retries every hour, instead of every 24 hours. 2022-08-13 14:20:10 +01:00
CalDescent
86526507a6 Increase time range and total number of attempts to fetch a QDN resource, as it previously gave up too quickly. 2022-08-12 22:25:35 +01:00
CalDescent
1b9128289f Added initial support to download Pirate wallet libraries from QDN, using hardcoded transaction signature.
Using a hardcoded signature ensures that the libraries cannot be swapped out without a core auto update, which requires the standard dev team approval process.
2022-08-12 21:57:31 +01:00
CalDescent
4a58f90223 Added support for multiple lightwalletd servers. 2022-08-12 21:52:02 +01:00
CalDescent
e68db40d91 Remove sapling params 2022-08-12 21:50:41 +01:00
CalDescent
bd6c0c9a7d qdata utility renamed to qdn 2022-08-12 13:38:30 +01:00
CalDescent
5804b9469c Default qdata port set to 12391 2022-08-12 13:38:03 +01:00
DrewMPeacock
4c463f65b7 Add API handles to build CREATE_POLL and VOTE_ON_POLL transactions. 2022-08-08 15:58:46 -06:00
CalDescent
53b47023ac ARRR default birthday increased to 2000000 2022-08-07 19:15:19 +01:00
CalDescent
22f9f08885 Added Windows amd64 architecture (again with a temporary path) 2022-08-07 19:14:21 +01:00
CalDescent
f26267e572 Don't sync, save, or load null seed wallets. They only act as a placeholder wallet when redeeming/refunding a P2SH. 2022-08-07 18:38:27 +01:00
CalDescent
e8c29226a1 Merge branch 'master' into pirate-chain 2022-08-06 16:41:18 +01:00
CalDescent
94f48f8f54 Don't allow QORT to be listed on the ARRR market unless the Pirate light wallet library is loaded. 2022-08-06 11:48:51 +01:00
CalDescent
3aac580f2c Add amd64 architecture (using a temporary path) 2022-08-06 11:47:36 +01:00
CalDescent
2d0b035f98 Updated AdvancedInstaller project for v3.4.3 2022-08-01 19:58:19 +01:00
CalDescent
075385d3ff Bump version to 3.4.3 2022-07-31 19:50:31 +01:00
CalDescent
6ed8250301 onlineAccountsModulusV2Timestamp set to Sat, 06 Aug 2022 16:00:00 UTC 2022-07-31 19:50:05 +01:00
CalDescent
d10ff49dcb Replaced arm architecture with aarch64, as 32 bit is unsupported. 2022-07-31 19:43:49 +01:00
CalDescent
4cf34fa932 Merge branch 'master' into pirate-chain 2022-07-31 16:00:52 +01:00
CalDescent
06b5d5f1d0 Merge branch 'increase-online-timestamp-modulus' 2022-07-31 13:00:47 +01:00
CalDescent
d6d2641cad Added "bitcoinjLookaheadSize" setting (default 50).
This replaces the WALLET_KEY_LOOKAHEAD_INCREMENT_BITCOINJ constant.
2022-07-31 12:07:44 +01:00
CalDescent
e71f22fd2c Added "gapLimit" setting.
This replaces the previously hardcoded "numberOfAdditionalBatchesToSearch" variable, and specifies the minimum number of empty consecutive addresses required before a set of wallet transactions is considered complete. Used for foreign transaction lists and balances.
2022-07-31 11:59:14 +01:00
CalDescent
c996633732 Added trace level logging. 2022-07-30 19:06:04 +01:00
CalDescent
55f973af3c Ensure all online accounts timestamps are a multiple of the online timestamp modulus.
This is a simple way to discard the 5-minute online account timestamps (from out of date nodes) once the switch to 30-minute online account timestamps has taken place.
2022-07-30 18:15:16 +01:00
CalDescent
fe9744eec6 Fixed missing feature trigger in testchain config 2022-07-30 14:52:47 +01:00
CalDescent
410fa59430 Merge branch 'master' into increase-online-timestamp-modulus
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
2022-07-30 12:23:25 +01:00
CalDescent
522ae2bce7 Updated AdvancedInstaller project for v3.4.2 2022-07-27 22:52:48 +01:00
CalDescent
a6e79947b8 Bump version to 3.4.2 2022-07-23 18:07:35 +01:00
CalDescent
b9bf945fd8 Removed aggregateSignatureTimestamp. All online account signatures are aggregated - there is no need for backwards support as signatures are trimmed from blocks after 24 hours. testOnlineAccountsModulusV2() had to be removed as this relied on pre-aggregation signatures. 2022-07-22 13:57:40 +01:00
CalDescent
85a27c14b8 Revert incorrect genesis timestamp that somehow made it into the stashed code. 2022-07-20 10:38:58 +01:00
CalDescent
46c40ca9ca Committed stashed code that is functional but probably too messy for production use.
Online account nonces are appended to the onlineAccountsSignatures to avoid the need for a new field, but it probably makes more sense to separate them.
2022-07-20 10:27:09 +01:00
CalDescent
fcd0d71cb6 Merge branch 'share-bin-activation' 2022-07-17 19:20:19 +01:00
catbref
275bee62d9 Revert BlockMinter to using long-lifetime repository session.
Although BlockMinter could reattach a repository session to its cache of potential blocks,
and these blocks would in turn reattach that repository session to their transactions,
further transaction-specific fields (e.g. creator PublicKeyAccount) were not being updated.

This would lead to NPEs like the following:

Exception in thread "BlockMinter" java.lang.NullPointerException
        at org.qortal.repository.hsqldb.HSQLDBRepository.cachePreparedStatement(HSQLDBRepository.java:587)
        at org.qortal.repository.hsqldb.HSQLDBRepository.prepareStatement(HSQLDBRepository.java:569)
        at org.qortal.repository.hsqldb.HSQLDBRepository.checkedExecute(HSQLDBRepository.java:609)
        at org.qortal.repository.hsqldb.HSQLDBAccountRepository.getBalance(HSQLDBAccountRepository.java:327)
        at org.qortal.account.Account.getConfirmedBalance(Account.java:72)
        at org.qortal.transaction.MessageTransaction.isValid(MessageTransaction.java:200)
        at org.qortal.block.Block.areTransactionsValid(Block.java:1190)
        at org.qortal.block.Block.isValid(Block.java:1137)
        at org.qortal.controller.BlockMinter.run(BlockMinter.java:301)

where the Account has an associated repository session which is now obsolete.

This commit reverts BlockMinter back to obtaining a repository session before entering main loop.
2022-07-17 13:53:07 +01:00
CalDescent
97221a4449 Added test to simulate level 7-8 reward tier activation, including orphaning. 2022-07-17 13:37:52 +01:00
CalDescent
508a34684b Revert "qoraHoldersShare reworked to qoraHoldersShareByHeight."
This reverts commit 90e8cfc737.

# Conflicts:
#	src/test/java/org/qortal/test/minting/RewardTests.java
2022-07-16 18:45:49 +01:00
CalDescent
3d2144f303 Check orphaning in levels 7-8 and 9-10 reward tests. This would have been tested in orphanCheck() anyway, but this makes the testing a bit more granular. 2022-07-16 17:29:16 +01:00
CalDescent
3c7fbed709 Fixed build error due to merge. 2022-07-15 11:53:30 +01:00
CalDescent
fb9a155e4c Merge branch 'master' into pirate-chain
# Conflicts:
#	src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java
#	src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java
2022-07-15 11:46:45 +01:00
CalDescent
fbcc870d36 Added informational test to compare ConsiceSet size against an int array for online account nonce arrays. 2022-07-15 10:23:14 +01:00
CalDescent
020e59743b Fixed failing test(s) due to merge. 2022-07-10 19:49:24 +01:00
CalDescent
0904de3f71 Merge branch 'master' into online-accounts-mempow-v2
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
2022-07-10 16:50:28 +01:00
CalDescent
35f3430687 Added share bin activation feature.
To prevent a single or very small number of minters receiving the rewards for an entire tier, share bins can now require "activation". This adds the requirement that a minimum number of accounts must be present in a share bin before it is considered active. When inactive, the rewards and minters are added to the previous tier.

Summary of new functionality:

- If a share bin has more than one, but less than 30 accounts present, the rewards and accounts are shifted to the previous share bin.
- This process is iterative, so the accounts can shift through multiple tiers until the minimum number of accounts is met, OR the share bin's starting level is less than shareBinActivationMinLevel.
- Applies to level 7+, so that no backwards support is needed. It will only take effect once the first account reaches level 7.

This requires hot swapping the sharesByLevel data to combine tiers where needed, so is a considerable shift away from the immutable array that was in place previously.

All existing and new unit tests are now passing, however a lot more testing will be needed.
2022-07-10 12:09:44 +01:00
CalDescent
90e8cfc737 qoraHoldersShare reworked to qoraHoldersShareByHeight.
This allows the QORA share percentage to be modified at different heights, based on community votes. Added unit test to simulate a reduction.
2022-07-08 11:12:58 +01:00
CalDescent
57bd3c3459 Merge remote-tracking branch 'catbref/auto-update-fix' 2022-07-07 18:48:39 +01:00
CalDescent
ad0d8fac91 Bump version to 3.4.1 2022-07-05 20:56:40 +01:00
CalDescent
a8b58d2007 Reward share limit activation timestamp set to 1657382400000 (Sat Jul 09 2022 16:00:00 UTC) 2022-07-05 20:34:23 +01:00
CalDescent
a099ecf55b Merge branch 'reduce-reward-shares' 2022-07-04 19:58:48 +01:00
CalDescent
6b91b0477d Added version query string param to /blocks/signature/{signature}/data API endpoint, to allow for optional V2 block serialization (with a single combined AT states hash).
Version can only be specified when querying unarchived blocks; archived blocks require V1 for now (and possibly V2 in the future).
2022-07-04 19:57:54 +01:00
CalDescent
fe2c63e8e4 Generate random nonces for test accounts.
These don't have to be valid for unit tests, because they are treated as "cached already valid accounts" in the block validation.
2022-07-02 17:30:31 +01:00
CalDescent
a3febdf00e Pass timestamp to OnlineAccountsManager.isMemoryPoWActive() so that block timestamp can be used. 2022-07-02 17:26:53 +01:00
CalDescent
4ca174fa0b Fixed bug in isMemoryPoWActive() which affected the ability to enable mempow via settings. 2022-07-02 13:45:39 +01:00
CalDescent
294582f136 Added mempow support in OnlineAccountsManager.
- This adds mempow requirements to online account importing (after activation timestamp), however doesn't yet add any requirements to block validation.
- It also causes the 'next' online accounts timestamp to be computed in addition to the 'current', so that the computed nonce value is ready when the next online accounts timestamp window begins.
2022-07-02 12:33:02 +01:00
CalDescent
d7e7c1f48c Fixed bugs from merge conflict, causing incorrect systray statuses in some cases. 2022-07-02 10:33:00 +01:00
CalDescent
215800fb67 Added optional "timeout" parameter to MemoryPoW.compute2().
This can be used to give up after the specified number of milliseconds.
2022-07-01 22:36:51 +01:00
CalDescent
b05d428b2e Added onlineAccountsMemPoWEnabled setting (for beta testing) 2022-07-01 22:31:51 +01:00
CalDescent
d2adadb600 Added onlineAccountsMemoryPoWTimestamp to blockchain.json 2022-07-01 22:31:30 +01:00
CalDescent
8e8c0b3fc5 Added OnlineAccountsV3Message, along with optional nonce Integer in OnlineAccountData.
This could potentially be released ahead of the other mempow code, splitting the rollout into multiple smaller phases.
2022-07-01 22:29:05 +01:00
CalDescent
65d63487f3 Merge branch 'master' into increase-online-timestamp-modulus 2022-07-01 17:46:35 +01:00
CalDescent
7c5932a512 GET /admin/status now returns online account submission status for "isMintingPossible", instead of BlockMinter status.
Online account credit is a more useful definition of "minting" than block signing, from the user's perspective. Should bring UI minting/syncing status in line with the core's systray status.
2022-07-01 17:29:15 +01:00
CalDescent
610a3fcf83 Improved order in getNodeType() 2022-07-01 16:48:57 +01:00
CalDescent
b329dc41bc Updated incorrect ONLINE_ACCOUNTS_V3_PEER_VERSION to 3.4.0 2022-07-01 13:36:56 +01:00
CalDescent
ff78606153 Merge branch 'master' into increase-online-timestamp-modulus 2022-07-01 13:14:15 +01:00
CalDescent
ef249066cd Updated another reference of SimpleTransaction::getTimestamp 2022-07-01 13:13:55 +01:00
CalDescent
80188629df Don't aggregate signatures when running OnlineAccountsTests, as it's too difficult to check how many unique signatures exist over a given period of time. 2022-07-01 13:13:22 +01:00
CalDescent
f77093731c Merge branch 'master' into increase-online-timestamp-modulus
# Conflicts:
#	src/main/java/org/qortal/block/Block.java
#	src/main/java/org/qortal/controller/OnlineAccountsManager.java
2022-07-01 13:03:19 +01:00
CalDescent
ca7d58c272 SimpleTransaction.timestamp is now in milliseconds instead of seconds.
Should fix 1970 timestamp issue in UI for foreign transactions, and also maintains consistency with QORT wallet transactions.
2022-07-01 12:46:20 +01:00
CalDescent
08f3351a7a Reward share transaction modifications:
- Reduce concurrent reward share limit from 6 to 3 (or from 5 to 2 when including self share) - as per community vote.
- Founders remain at 6 (5 when including self share) - also decided in community vote.
- When all slots are being filled, require that at least one is a self share, so that not all can be used for sponsorship.
- Activates at future undecided timestamp.
2022-07-01 12:18:48 +01:00
QuickMythril
f499ada94c Merge pull request #91 from qortish/master
Update SysTray_sv.properties
2022-06-29 08:44:47 -04:00
qortish
f073040c06 Update SysTray_sv.properties
proper
2022-06-29 14:04:27 +02:00
CalDescent
49bfb43bd2 Updated AdvancedInstaller project for v3.4.0 2022-06-28 22:56:11 +01:00
CalDescent
425c70719c Bump version to 3.4.0 2022-06-28 19:29:09 +01:00
CalDescent
1420aea600 aggregateSignatureTimestamp set to 1656864000000 (Sun Jul 03 2022 16:00:00 UTC) 2022-06-28 19:26:00 +01:00
CalDescent
4543062700 Updated blockchain.json files in unit tests to include an already active "aggregateSignatureTimestamp" 2022-06-28 19:22:54 +01:00
CalDescent
722468a859 Restrict relays to v3.4.0 peers and above, in attempt to avoid bugs causing older peers to break relay chains. 2022-06-27 19:38:30 +01:00
CalDescent
492a9ed3cf Fixed more message rebroadcasts that were missing IDs. 2022-06-26 20:02:08 +01:00
CalDescent
420b577606 No longer adding inferior chain signatures in comparePeers() as it doesn't seem 100% reliable in some cases. It's better to re-check weights on each pass. 2022-06-26 18:24:33 +01:00
CalDescent
434038fd12 Reduced online accounts log spam 2022-06-26 16:34:04 +01:00
CalDescent
a9b154b783 Modified BlockMinter.higherWeightChainExists() so that it checks for invalid blocks before treating a chain as higher weight. Otherwise minting is slowed down when a higher weight but invalid chain exists on the network (e.g. after a hard fork). 2022-06-26 15:54:41 +01:00
CalDescent
a01652b816 Removed hasInvalidBlock filtering, as this was unnecessary risk now that the original bug in comparePeers() is fixed. 2022-06-26 10:09:24 +01:00
CalDescent
4440e82bb9 Fixed long term bug in comparePeers() causing peers with invalid blocks to prevent alternate valid but lower weight candidates from being chosen. 2022-06-25 16:34:42 +01:00
CalDescent
a2e1efab90 Synchronize hasInvalidBlock predicate, as it wasn't thread safe 2022-06-25 14:12:21 +01:00
CalDescent
7e1ce38f0a Fixed major bug in hasInvalidBlock predicate 2022-06-25 14:11:25 +01:00
CalDescent
a93bae616e Invalid signatures are now stored as ByteArray instead of String, to avoid regular Base58 encoding and decoding, which is very inefficient. 2022-06-25 13:29:53 +01:00
CalDescent
a2568936a0 Synchronizer: filter out peers reporting to hold invalid block signatures.
We already mark peers as misbehaved if they returned invalid signatures, but this wasn't sufficient when multiple copies of the same invalid block exist on the network (e.g. after a hard fork). In these cases, we need to be more proactive to avoid syncing with these peers, to increase the chances of preserving other candidate blocks.
2022-06-25 12:45:19 +01:00
CalDescent
23408827b3 Merge remote-tracking branch 'catbref/schnorr-agg-BlockMinter-fix' into schnorr-agg-BlockMinter-fix
# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
#	src/main/java/org/qortal/controller/OnlineAccountsManager.java
#	src/main/java/org/qortal/network/message/BlockV2Message.java
#	src/main/resources/blockchain.json
#	src/test/resources/test-chain-v2.json
2022-06-24 11:47:58 +01:00
CalDescent
ae6e2fab6f Rewrite of isNotOldPeer predicate, to fix logic issue (second attempt - first had too many issues)
Previously, a peer would be continuously considered not 'old' if it had a connection attempt in the past day. This prevented some peers from being removed, causing nodes to hold a large repository of peers. On slower systems, this large number of known peers resulted in low numbers of outbound connections being made, presumably because of the time taken to iterate through dataset, using up a lot of allKnownPeers lock time.

On devices that experienced the problem, it could be solved by deleting all known peers. This adds confidence that the old peers were the problem.
2022-06-24 10:36:06 +01:00
CalDescent
3af36644c0 Revert "Rewrite of isNotOldPeer predicate, to fix logic issue."
This reverts commit d81071f254.
2022-06-24 10:26:39 +01:00
CalDescent
db8f627f1a Default minPeerVersion set to 3.3.7 2022-06-24 10:13:55 +01:00
CalDescent
5db0fa080b Prune peers every 5 minutes instead of every cycle of the Controller thread.
This should reduce the amount of time the allKnownPeers lock is held.
2022-06-24 10:13:36 +01:00
CalDescent
d81071f254 Rewrite of isNotOldPeer predicate, to fix logic issue.
Previously, a peer would be continuously considered not 'old' if it had a connection attempt in the past day. This prevented some peers from being removed, causing nodes to hold a large repository of peers. On slower systems, this large number of known peers resulted in low numbers of outbound connections being made, presumably because of the time taken to iterate through dataset, using up a lot of allKnownPeers lock time.

On devices that experienced the problem, it could be solved by deleting all known peers. This adds confidence that the old peers were the problem.
2022-06-24 10:11:46 +01:00
QuickMythril
ba148dfd88 Added Korean translations
credit: TL (Discord username)
2022-06-23 02:35:42 -04:00
QuickMythril
ff40b8f8ab Updated German translations 2022-06-23 01:43:33 -04:00
CalDescent
dbcb457a04 Merge branch 'master' of github.com:Qortal/qortal 2022-06-20 22:51:33 +01:00
CalDescent
b00e1c8f47 Allow online account submission in all cases when in recovery mode. 2022-06-20 22:50:41 +01:00
CalDescent
899a6eb104 Rework of systray statuses
- Show "Minting" as long as online accounts are submitted to the network (previously it related to block signing).
- Fixed bug causing it to regularly show "Synchronizing 100%".
- Only show "Synchronizing" if the chain falls more than 2 hours behind - anything less is unnecessary noise.
2022-06-20 22:48:32 +01:00
CalDescent
6e556c82a3 Updated AdvancedInstaller project for v3.3.7 2022-06-20 22:25:40 +01:00
CalDescent
35ce64cc3a Bump version to 3.3.7 2022-06-20 21:52:41 +01:00
CalDescent
09b218d16c Merge branch 'master' of github.com:Qortal/qortal 2022-06-20 21:51:39 +01:00
CalDescent
7ea451e027 Allow trades to be initiated, and QDN data to be published, as long as the latest block is within 60 minutes of the current time. Again this should remove negative effects of larger re-orgs from the UX. 2022-06-20 21:26:48 +01:00
CalDescent
ffb27c3946 Further relaxed min latest block timestamp age to be considered "up to date" in a few places, from 30 to 60 mins. This should help reduce the visible effects of larger re-orgs if they happen again. 2022-06-20 21:25:14 +01:00
QuickMythril
6e7d2b50a0 Added Romanian translations
credit: Ovidiu (Telegram username)
2022-06-20 01:27:28 -04:00
QuickMythril
bd025f30ff Updated ApiError German translations
credit: CD (Discord username)
2022-06-20 00:56:19 -04:00
CalDescent
c6cbd8e826 Revert "Sync behaviour changes:"
This reverts commit 8a76c6c0de.
2022-06-19 19:10:37 +01:00
CalDescent
b85afe3ca7 Revert "Keep existing findCommonBlocksWithPeers() and comparePeers() behaviour prior to consensus switchover, to reduce the number of variables."
This reverts commit fecfac5ad9.
2022-06-19 19:10:30 +01:00
CalDescent
5a4674c973 Revert "newConsensusTimestamp set to Sun Jun 19 2022 16:00:00 UTC"
This reverts commit 55a0c10855.
2022-06-19 19:09:57 +01:00
CalDescent
769418e5ae Revert "Fixed unit tests due to missing feature trigger"
This reverts commit 28f9df7178.
2022-06-19 19:09:52 +01:00
CalDescent
38faed5799 Don't submit online accounts if node is more than 2 hours of sync. 2022-06-19 18:56:08 +01:00
catbref
10a578428b Improve intermittent auto-update failures, mostly under non-Windows environments.
Symptoms are:
* AutoUpdate trying to run new ApplyUpdate process, but nothing appears in log-apply-update.?.txt
* Main qortal.jar process continues to run without updating
* Last AutoUpdate line in log.txt.? is:
	2022-06-18 15:42:46 INFO  AutoUpdate:258 - Applying update with: /usr/local/openjdk11/bin/java -Djava.net.preferIPv4Stack=false -Xss256k -Xmx1024m -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=127.0.0.1:5005 -cp new-qortal.jar org.qortal.ApplyUpdate

Changes are:
* child process now inherits parent's stdout / stderr (was piped from parent)
* child process is given a fresh stdin, which is immediately closed
* AutoUpdate now converts -agentlib JVM arg to -DQORTAL_agentlib
* ApplyUpdate converts -DQORTAL_agentlib to -agentlib

The latter two changes are to prevent a conflict where two processes try to reuse the same JVM debugging port number.
2022-06-19 18:25:00 +01:00
CalDescent
96cdf4a87e Updated AdvancedInstaller project for v3.3.6 2022-06-19 11:41:50 +01:00
CalDescent
c0b1580561 Bump version to 3.3.6 2022-06-17 20:42:51 +01:00
CalDescent
28f9df7178 Fixed unit tests due to missing feature trigger 2022-06-17 13:07:33 +01:00
CalDescent
55a0c10855 newConsensusTimestamp set to Sun Jun 19 2022 16:00:00 UTC 2022-06-17 13:04:04 +01:00
CalDescent
7c5165763d Merge branch 'sync-long-tip' 2022-06-17 13:02:08 +01:00
CalDescent
d2836ebcb9 Make sure to set the ID when rebroadcasting messages. Hopeful fix for QDN networking issues. 2022-06-16 22:39:51 +01:00
CalDescent
fecfac5ad9 Keep existing findCommonBlocksWithPeers() and comparePeers() behaviour prior to consensus switchover, to reduce the number of variables. 2022-06-16 18:38:27 +01:00
CalDescent
5ed1ec8809 Merge remote-tracking branch 'catbref/sync-long-tip' into sync-long-tip 2022-06-16 18:35:16 +01:00
catbref
431cbf01af BlockMinter will discard block candidates that turn out to be invalid just prior to adding transactions, to be potentially reminted in the next pass 2022-06-16 17:47:08 +01:00
CalDescent
af792dfc06 Updated AdvancedInstaller project for v3.3.5 2022-06-15 21:11:12 +01:00
CalDescent
d3b6c5f052 Bump version to 3.3.5 2022-06-14 23:18:46 +01:00
CalDescent
f48eb27f00 Revert "Temporarily limit block minter as first stage of multipart minting fix. This will be reverted almost immediately after release."
This reverts commit 0eebfe4a8c.
2022-06-14 22:42:44 +01:00
CalDescent
b02ac2561f Revert "Safety check - also to be removed shortly."
This reverts commit 8b3f9db497.
2022-06-14 22:42:38 +01:00
CalDescent
1b2f66b201 Bump version to 3.3.4 2022-06-13 23:40:39 +01:00
CalDescent
e992f6b683 Increase column size to allow for approx 10x as many online accounts in each block. 2022-06-13 22:51:48 +01:00
CalDescent
8b3f9db497 Safety check - also to be removed shortly. 2022-06-13 22:41:10 +01:00
CalDescent
0eebfe4a8c Temporarily limit block minter as first stage of multipart minting fix. This will be reverted almost immediately after release. 2022-06-13 22:40:54 +01:00
lexandr0s
c03344caae Update ApiError_ru.properties 2022-06-04 23:57:25 +04:00
lexandr0s
237b39a524 Update SysTray_ru.properties 2022-06-04 23:50:03 +04:00
CalDescent
12b3fc257b Updated AdvancedInstaller project for v3.3.3 2022-06-04 19:50:43 +01:00
CalDescent
66a3322ea6 Bump version to 3.3.3 2022-06-04 19:16:01 +01:00
CalDescent
4965cb7121 Set BlockV2Message min peer version to 3.3.3 2022-06-04 15:50:17 +01:00
CalDescent
b92b1fecb0 disableReferenceTimestamp set to 1655222400000 (Tuesday, 14 June 2022 16:00:00 UTC) 2022-06-04 14:51:37 +01:00
CalDescent
43a75420d0 Merge branch 'disable-reference' 2022-06-04 14:46:51 +01:00
catbref
e85026f866 Initial work on reducing network load for transferring blocks.
Reduced AT state info from per-AT address + state hash + fees to AT count + total AT fees + hash of all AT states.
Modified Block and Controller to support above. Controller needs more work regarding CachedBlockMessages.
Note that blocks fetched from archive are in old V1 format.
Changed Triple<BlockData, List<TransactionData>, List<ATStateData>> to BlockTransformation to support both V1 and V2 forms.

Set min peer version to 3.3.203 in BlockV2Message class.
2022-06-04 14:43:46 +01:00
CalDescent
ba7b9f3ad8 Added feature to allow repository to be kept intact after running certain tests 2022-06-04 14:17:01 +01:00
catbref
4eb58d3591 BlockTimestampTests to show results from changing blockTimingsByHeight 2022-06-04 12:36:36 +01:00
catbref
8d8e58a905 Network$NetworkProcessor now has its own LOGGER 2022-06-04 12:36:36 +01:00
catbref
8f58da4f52 OnlineAccountsManager:
Bump v3 min peer version from 3.2.203 to 3.3.203
No need for toOnlineAccountTimestamp(long) as we only ever use getCurrentOnlineAccountTimestamp().
Latter now returns Long and does the call to NTP.getTime() on behalf of caller, removing duplicated NTP.getTime() calls and null checks in multiple callers.
Add aggregate-signature feature-trigger timestamp threshold checks where needed, near sign() and verify() calls.
Improve logging - but some logging will need to be removed / reduced before merging.
2022-06-04 12:36:36 +01:00
catbref
a4e2aedde1 Remove debug-hindering "final" modifier from effectively final locals 2022-06-04 12:36:36 +01:00
catbref
24d04fe928 Block.mint() always uses latest timestamped online accounts 2022-06-04 12:36:35 +01:00
catbref
0cf32f6c5e BlockMinter now only acquires repository instance as needed to prevent long HSQLDB rollbacks 2022-06-04 12:35:54 +01:00
catbref
84d850ee0b WIP: use blockchain feature-trigger "aggregateSignatureTimestamp" to determine when online-accounts sigs and block sigs switch to aggregate sigs 2022-06-04 12:35:51 +01:00
catbref
51930d3ccf Move some private key methods to Crypto class 2022-06-04 12:35:15 +01:00
catbref
c5e5316f2e Schnorr public key and signature aggregation for 'online accounts'.
Aggregated signature should reduce block payload significantly,
as well as associated network, memory & CPU loads.

org.qortal.crypto.BouncyCastle25519 renamed to Qortal25519Extras.
Our class provides additional features such as DH-based shared secret,
aggregating public keys & signatures and sign/verify for aggregate use.

BouncyCastle's Ed25519 class copied in as BouncyCastleEd25519,
but with 'private' modifiers changed to 'protected',
to allow extension by our Qortal25519Extras class,
and to avoid lots of messy reflection-based calls.
2022-06-04 12:35:15 +01:00
catbref
829ab1eb37 Cherry-pick minor fixes from another branch to resolve "No online accounts - not even our own?" issues 2022-06-04 12:35:03 +01:00
catbref
d9b330b46a OnlineAccountData no longer uses signature in equals() or hashCode() because newer aggregate signatures use random nonces and OAD class doesn't care about / verify sigs 2022-06-04 10:49:59 +01:00
catbref
c032b92d0d Logging fix: size() was called on wrong collection, leading to confusing logging output 2022-06-04 10:49:59 +01:00
catbref
ae92a6eed4 OnlineAccountsV3: slightly rework Block.mint() so it doesn't need to filter so many online accounts
Slight optimization to BlockMinter by adding OnlineAccountsManager.hasOnlineAccounts():boolean instead of returning actual data, only to call isEmpty()!
2022-06-04 10:49:59 +01:00
catbref
712c4463f7 OnlineAccountsV3:
Move online account cache code from Block into OnlineAccountsManager, simplifying Block code and removing duplicated caches from Block also.
This tidies up those remaining set-based getters in OnlineAccountsManager.
No need for currentOnlineAccountsHashes's inner Map to be sorted so addAccounts() creates new ConcurentHashMap insteaad of ConcurrentSkipListMap.

Changed GetOnlineAccountsV3Message to use a single byte for count of hashes as it can only be 1 to 256.
256 is represented by 0.

Comments tidy-up.
Change v3 broadcast interval from 10s to 15s.
2022-06-04 10:49:59 +01:00
catbref
fbdc1e1cdb OnlineAccountsV3:
Adding support for GET_ONLINE_ACCOUNTS_V3 to Controller, which calls OnlineAccountsManager.

With OnlineAccountsV3, instead of nodes sending their list of known online accounts (public keys),
nodes now send a summary which contains hashes of known online accounts, one per timestamp + leading-byte combo.
Thus outgoing messages are much smaller and scale better with more users.
Remote peers compare the hashes and send back lists of online accounts (for that timestamp + leading-byte combo) where hashes do not match.

Massive rewrite of OnlineAccountsManager to maintain online accounts.
Now there are three caches:
1. all online accounts, but split into sets by timestamp
2. 'hashes' of all online accounts, one hash per timestamp+leading-byte combination
Mainly for efficient use by GetOnlineAccountsV3 message constructor.
3. online accounts for the highest blocks on our chain to speed up block processing
Note that highest blocks might be way older than 'current' blocks if we're somewhat behind in syncing.

Other OnlineAccountsManager changes:
* Use scheduling executor service to manage subtasks
* Switch from 'synchronized' to 'concurrent' collections
* Generally switch from Lists to Sets - requires improved OnlineAccountData.hashCode() - further work needed
* Only send V3 messages to peers with version >= 3.2.203 (for testing)
* More info on which online accounts lists are returned depending on use-cases

To test, change your peer's version (in pom.xml?) to v3.2.203.
2022-06-04 10:49:59 +01:00
catbref
f2060fe7a1 Initial work on online-accounts-v3 network messages to drastically reduce network load.
Lots of TODOs to action.
2022-06-04 10:49:59 +01:00
catbref
6950c6bf69 Initial work on reducing network load for transferring blocks.
Reduced AT state info from per-AT address + state hash + fees to AT count + total AT fees + hash of all AT states.
Modified Block and Controller to support above. Controller needs more work regarding CachedBlockMessages.
Note that blocks fetched from archive are in old V1 format.
Changed Triple<BlockData, List<TransactionData>, List<ATStateData>> to BlockTransformation to support both V1 and V2 forms.

Set min peer version to 3.3.203 in BlockV2Message class.
2022-06-04 10:49:11 +01:00
catbref
8a76c6c0de Sync behaviour changes:
* Prefer longer chains
* Compare chain tips
* Skip pre-sync all-peer sorting (for now)
2022-06-04 10:23:31 +01:00
CalDescent
ef51cf5702 Added defensiveness in getOnlineTimestampModulus(), just in case NTP.getTime() returns null 2022-06-02 12:46:11 +01:00
CalDescent
0c3988202e Merge branch 'master' into increase-online-timestamp-modulus 2022-06-02 12:38:05 +01:00
CalDescent
987446cf7f Updated AdvancedInstaller project for v3.3.2 2022-06-02 11:31:56 +01:00
CalDescent
6dd44317c4 Sync pirate wallet every 30 seconds instead of 60, to match behaviour of official wallet. 2022-05-31 10:09:37 +02:00
CalDescent
d2fc705846 Merge remote-tracking branch 'catbref/UnsupportedMessage' 2022-05-31 09:09:08 +02:00
CalDescent
e393150e9c Require references to be the correct length post feature-trigger 2022-05-30 22:40:39 +02:00
CalDescent
43bfd28bcd Revert "Discard unsupported messages instead of disconnecting the peer."
This reverts commit d086ade91f.
2022-05-30 21:46:16 +02:00
catbref
ca8f8a59f4 Better forwards compatibility with newer message types so we don't disconnect newer peers 2022-05-30 20:41:45 +01:00
CalDescent
85a26ae052 Another rework of null seed wallets, to allow them to be saved and loaded.
A full sync is unavoidable for P2SH redeem/refund, so we need to be able to save our progress. Creating a new null seed wallet each time isn't an option because it relies on having a recent checkpoint to avoid having to sync large amounts of blocks every time (sync is per wallet, not per node).
2022-05-30 19:31:55 +02:00
CalDescent
c30b1145a1 Improved ensureSynchronized() as it would often not notice an unsynced wallet. 2022-05-30 18:14:26 +02:00
CalDescent
d086ade91f Discard unsupported messages instead of disconnecting the peer. 2022-05-30 15:27:42 +02:00
CalDescent
64d4c458ec Fixed logging error 2022-05-30 15:16:44 +02:00
CalDescent
2478450694 Revert "Removed "consecutive blocks" limitation in block minter."
This reverts commit f41fbb3b3d.
2022-05-30 13:49:15 +02:00
CalDescent
9f19a042e6 Added test to ensure short (1 byte) references can be imported. 2022-05-30 13:46:00 +02:00
CalDescent
922ffcc0be Modified post-trigger last reference checking, to now require a non-null value
This allows for compatibility with TRANSFER_PRIVS validation in commit 8950bb7, which treats any account with a non-null reference as "existing". It also avoids possible unknown side effects from trying to process and store transactions with a null reference - something that wouldn't have been possible until the validation was removed.
2022-05-30 13:22:15 +02:00
CalDescent
f887fcafe3 Disable last reference validation after feature trigger timestamp (not yet set).
This should prevent the failed transactions that are encountered when issuing two or more in a short space of time. Using a feature trigger (hard fork) to release this, to avoid potential consensus confusion around the time of the update (older versions could consider the main chain invalid until updating).
2022-05-30 13:06:55 +02:00
CalDescent
48b562f71b Auto update check interval slowed from 10s to 30s, to hopefully reduce the chance of encountering "repository in use by another process?" error. 2022-05-28 14:33:37 +02:00
CalDescent
5203742b05 Started work on architecture-specific lite wallet library loading. Paths are not yet correct. 2022-05-28 14:31:38 +02:00
CalDescent
f14b494bfc "Disposable" wallets renamed to "null seed" wallets, as this is a better description of what they are. 2022-05-28 14:28:42 +02:00
CalDescent
9a4ce57001 Increase blockchain lock wait time from 30 to 60 seconds in /transactions/process.
This will hopefully reduce the number of failed tradeoffer listings that result in a nonfunctional tradebot (and subsequent PENDING status shown in the UI)
2022-05-28 13:33:09 +02:00
CalDescent
10af961fdf Consider a node with a block in the last 30 mins to be "up to date" when trading. 2022-05-27 22:31:21 +02:00
CalDescent
33cffe45fd Bump version to 3.3.2 2022-05-27 17:23:53 +02:00
CalDescent
a0ce75a978 Minimum BTC order amount set to 0.001 BTC. Anything lower than that will result in greater than 10% fees. 2022-05-27 16:59:34 +02:00
QuickMythril
8d168f6ad4 Override default Bitcoin trade fee
Reduced to 20 sats/byte.
2022-05-27 06:57:38 -04:00
CalDescent
0875c5bf3b Fix ConcurrentModificationException in getCachedSigValidTransactions() 2022-05-27 10:34:26 +02:00
CalDescent
b17b28d9d6 Catch NoSuchMethodError in ElectrumX, and log it, just in case we ever reencounter a dependency issue. 2022-05-27 10:33:37 +02:00
CalDescent
e95249dc1b Reduced bouncycastle version to 1.69, as 1.70 was having compatibility issues with the ElectrumX code. 2022-05-27 10:30:28 +02:00
CalDescent
bb4bdfede5 Added concept of a "disposable" pirate chain wallet.
This is needed to allow redeem/refund of P2SH without having an actively synced and initialized wallet. It also ultimately avoids us having to retain the wallet entropy in the trade bot states. Various safety checks have been introduced to make sure that a disposable wallet is never used for anything other than P2SH redeem/refund.
2022-05-27 10:29:24 +02:00
CalDescent
e2b241d416 Fix ConcurrentModificationException in getCachedSigValidTransactions() 2022-05-27 10:15:41 +02:00
CalDescent
aeb94fb879 Merge branch 'master' into pirate-chain 2022-05-27 09:16:38 +02:00
QuickMythril
8e71cbd822 Reduce static Bitcoin trade fee 2022-05-26 16:25:07 -04:00
QuickMythril
9896ec2ba6 Fix typo 2022-05-26 15:37:38 -04:00
QuickMythril
9f9a74809e Add Bitcoin ACCTv3
This provides support for restoring BTC in the Trade Portal.
2022-05-26 15:34:51 -04:00
QuickMythril
acce81cdcd Add tray menu item to show Build Version
Core build version in a message dialog for OS which cannot display the entire tooltip.
2022-05-26 15:10:04 -04:00
CalDescent
d72953ae78 Drop expired transactions from the import queue before they are considered "sig valid".
This should prevent expired transactions from being kept alive, adding unnecessary load to the import queue.
2022-05-25 19:06:08 +01:00
CalDescent
32213b1236 Catch all exceptions in PirateLightClient 2022-05-24 20:48:57 +01:00
CalDescent
761d461bad Bump bouncycastle version to 1.70, necessary for ALPN support in some JDKs. 2022-05-24 20:35:19 +01:00
CalDescent
774a3b3dcd Catch RuntimeException, so that the gRPC client is shutdown. 2022-05-24 19:52:23 +01:00
CalDescent
30567d0e87 Correctly handle shutdown of gRPC managed channel on error. 2022-05-24 19:32:41 +01:00
CalDescent
6b53eb5384 Pirate Chain uses the 'b' prefix for P2SH addresses, but the light wallet library is configured to use 't3' (from Zcash), so it's easiest to just derive a different prefix for each destination.
This could be simplified by configuring the light wallet library to use the correct 'b' prefix, but this didn't work when first attempted.
2022-05-23 23:14:54 +01:00
CalDescent
767ef62b64 Added PirateChain.isValidWalletKey() 2022-05-23 22:11:23 +01:00
CalDescent
f7e6d1e5c8 Merge branch 'master' into pirate-chain 2022-05-23 21:55:49 +01:00
CalDescent
551686c2de Updated AdvancedInstaller project for v3.3.1 2022-05-23 21:54:25 +01:00
CalDescent
b73c041cc3 Bump version to 3.3.1 2022-05-23 20:31:36 +01:00
CalDescent
9e8d85285f Removed extra unnecessary digest after writing new data. 2022-05-22 16:13:08 +01:00
CalDescent
f41fbb3b3d Removed "consecutive blocks" limitation in block minter. 2022-05-22 16:13:04 +01:00
CalDescent
3f5240157e Return more detailed errors in trade portal APIs. 2022-05-22 16:11:20 +01:00
CalDescent
7c807f754e First draft of Pirate Chain trade bot. Not tested yet.
This has been modified to a) use full public keys instead of PKH, and b) hand off all transaction building, signing, and broadcasting to the (heavily customized) Pirate light wallet library.
2022-05-22 16:07:48 +01:00
CalDescent
9e1b23caf6 Removed unused constants. 2022-05-22 16:01:12 +01:00
CalDescent
c2bad62d36 Updated status text when not initialized. 2022-05-22 16:00:37 +01:00
CalDescent
4516d44cc0 UnspentOutput additions to support latest PirateChainHTLC methods 2022-05-22 15:55:40 +01:00
CalDescent
9c02b01318 Added PirateChainHTLC.getUnspentFundingTxid(), allowing a minimum amount to be specified.
This will ensure that the correct fundingTxid can be redeemed or refunded by the trade bot.
2022-05-22 15:55:01 +01:00
CalDescent
08fab451d2 Added PirateChainHTLC.getFundingTxid(), to lookup the txid that funded a P2SH. 2022-05-22 15:49:14 +01:00
CalDescent
d47570c642 First draft of Pirate Chain ACCT, with modifications to allow for 33 byte public keys instead of 20 byte PKH. Pirate Chain HTLCs are required to use full public keys rather than hashes. 2022-05-22 12:23:01 +01:00
CalDescent
4547386b1f Increase receiving_account_info column size from 32 to 128 bytes, to allow for Pirate Chain sapling shielded addresses, which are much longer. 2022-05-21 15:24:37 +01:00
CalDescent
ab01dc5e54 Implemented address validation for Pirate Chain 2022-05-21 09:22:49 +01:00
CalDescent
380c742aad Pirate chain minimum order size temporarily decreased to 0.0001, for testing only. 2022-05-21 09:11:24 +01:00
CalDescent
368359917b Use LegacyZcashAddress (copied and modified from bitcoinj) to derive and encode Pirate Chain P2SH addresses.
It wasn't possible to use bitcoinj directly because Zcash-style P2SH addresses have 2 prefix characters (t3), which isn't supported by bitcoinj.
2022-05-20 21:27:38 +01:00
CalDescent
1c1b570cb3 Added Pirate TLC and raw transaction tests 2022-05-20 21:22:18 +01:00
CalDescent
3fe43372a7 Added PirateChainHTLC - uses a different HTLC format suitable for Pirate Chain.
Also removes transaction building code, since this is handled by the light wallet library.
2022-05-20 21:21:51 +01:00
CalDescent
c7bc1d7dcd Hardcode fee to MAINNET_FEE when sending coins. 2022-05-20 21:16:12 +01:00
CalDescent
a7ea6ec80d Added wrapper methods for Pirate LiteWalletJni P2SH funding/redeeming/refunding.
These require a heavily customized version of both piratewallet-light-cli and librustzcash.
2022-05-20 21:14:50 +01:00
CalDescent
9cf574b9e5 Added support for Pirate Chain transaction lookups and deserialization, necessary for HTLC status checks. 2022-05-20 21:07:54 +01:00
CalDescent
20e63a1190 Removed "consecutive blocks" limitation in block minter. 2022-05-20 13:38:56 +01:00
CalDescent
f6fc5de520 Removed extra unnecessary digest after writing new data. 2022-05-20 13:36:56 +01:00
CalDescent
0b89118cd1 Fixed bug in Pirate wallet transaction parsing. 2022-05-20 13:35:52 +01:00
CalDescent
fa3a81575a Reduce wasted time that could otherwise be spent validating queued transaction signatures. 2022-05-14 12:53:36 +01:00
CalDescent
6990766f75 Speed up unconfirmed transaction propagation.
Currently, new transactions take a very long time to be included in each block (or reach the intended recipient), because each node has to obtain a repository lock and import the transaction before it notifies its peers. This can take a long time due to the lock being held by the block minter or synchronizer, and this compounds with every peer that the transaction is routed through.

Validating signatures doesn't require a lock, and so can take place very soon after receipt of a new transaction. This change causes each node to broadcast a new transaction to its peers as soon as its signature is validated, rather than waiting until after the import.

When a notified peer then makes a request for the transaction data itself, this can now be loaded from the sig-valid import queue as an alternative to the repository (since they won't be in the repository until after the import, which likely won't have happened yet).

One small downside to this approach is that each unconfirmed transaction is now notified twice - once after the signature is deemed valid, and again in Controller.onNewTransaction(), but this should be an acceptable trade off given the speed improvements it should achieve. Another downside is that it could cause invalid transactions (with valid signatures) to propagate, but these would quickly be added to each peer's invalidUnconfirmedTransactions list after the import failure, and therefore be ignored.
2022-05-14 12:43:54 +01:00
CalDescent
e1e1a66a0b Improvements and fixes to PirateWallet 2022-05-14 12:10:35 +01:00
CalDescent
e552994f68 Merge branch 'master' into pirate-chain 2022-05-13 13:23:26 +01:00
CalDescent
b33afd99a5 Bump invalid transaction import logging from trace to debug. 2022-05-13 13:22:58 +01:00
CalDescent
3c2ba4a0ea Improved logging when importing transactions. 2022-05-13 12:31:48 +01:00
CalDescent
ab0fc07ee9 Refactored transaction importer, to separate signature validation from importing.
Importing has to be single threaded since it requires the database lock, but there's nothing to stop us from validating signatures on multiple threads, as no lock is required. So it makes sense to separate these two functions to allow for possible multi threaded signature validation in the future, to speed up the process.

Everything remains single threaded in this commit. It should be functionally the same as before, to reduce risk.
2022-05-13 12:31:19 +01:00
CalDescent
001650d48e Fixed typo in currently unused getSignaturesInvolvingAddress() method. 2022-05-13 11:28:55 +01:00
CalDescent
659431ebfd Added "txTypes" parameter to GET /transactions/unconfirmed, to allow optional filtering of unconfirmed transactions by one or more types 2022-05-13 11:28:21 +01:00
CalDescent
0a419cb105 Added "creator" parameter to GET /transactions/unconfirmed, to allow filtering unconfirmed transactions by creator's public key 2022-05-13 11:13:39 +01:00
CalDescent
a4d4d17b82 Added /wallets to .gitignore, in preparation for pirate chain support. 2022-05-12 19:34:53 +01:00
CalDescent
0829ff6908 Fixed bugs in tooltip, introduced in lite node branch 2022-05-12 19:33:12 +01:00
CalDescent
2d1b0fd6d0 Merge branch 'lite-node' 2022-05-12 08:51:05 +01:00
CalDescent
122539596d Merge branch 'qdn-direct-connections' 2022-05-12 08:45:45 +01:00
CalDescent
86015e59a1 Updated AdvancedInstaller project for v3.3.0 2022-05-10 08:27:17 +01:00
CalDescent
107a23f1ec Switch to PirateChainMainNetParams 2022-05-10 08:05:32 +01:00
CalDescent
abce068b97 Catch UnsatisfiedLinkError when initializing Pirate Chain library 2022-05-10 08:05:06 +01:00
CalDescent
28fd9241d4 Fixed issues with merge 2022-05-10 08:01:07 +01:00
CalDescent
3fc4746a52 Merge branch 'master' into pirate-chain
# Conflicts:
#	src/test/java/org/qortal/test/crosschain/DigibyteTests.java
#	src/test/java/org/qortal/test/crosschain/RavencoinTests.java
2022-05-10 07:54:47 +01:00
CalDescent
1ea1e00344 Bump version to 3.3.0 2022-05-09 18:47:19 +01:00
CalDescent
598f219105 Don't require a full synchronization in order to return the ARRR wallet address. 2022-05-08 08:26:00 +01:00
CalDescent
bbf7193c51 Fixed bugs in wallet initialization 2022-05-08 08:24:54 +01:00
CalDescent
adecb21ada Added mainnet xpub addresses for DGB and RVN tests, as testnet support isn't fully implemented yet. 2022-05-07 17:40:36 +01:00
CalDescent
fa4679dcc4 Updated DGB and RVN tests due to conflict. 2022-05-07 17:05:10 +01:00
CalDescent
58917eeeb4 "walletsPath" is now configurable in the settings. 2022-05-07 17:01:19 +01:00
CalDescent
f36e193650 Merge branch 'master' into pirate-chain
# Conflicts:
#	src/main/java/org/qortal/settings/Settings.java
2022-05-07 16:50:32 +01:00
CalDescent
dac484136f Fixed bug in name rebuilding. 2022-05-07 16:46:10 +01:00
CalDescent
999ad857ae Fixed typo 2022-05-07 16:33:10 +01:00
CalDescent
d073b9da65 Added support for Pirate Chain wallets.
Note: this relies on (a modified version of) liblitewallet-jni which is not included, but will ultimately be compiled for each supported architecture and hosted on QDN.

LiteWalletJni code is based on https://github.com/PirateNetwork/cordova-plugin-litewallet - thanks to @CryptoForge for the help in getting this up and running.
2022-05-07 16:32:04 +01:00
CalDescent
aaa0b25106 Make sure to set Peer.isDataPeer() to false as well as true, to prevent bugs due to object reuse.
Also designate a peer as a "data peer" when making an outbound connection to request data from it.
2022-05-02 10:20:23 +01:00
CalDescent
f7dabcaeb0 Increase ONLINE_ACCOUNTS_MODULUS from 5 to 30 mins at a future undecided timestamp.
Note: it's important that this timestamp is set on a 1-hour boundary (such as 16:00:00) to ensure a clean switchover.

# Conflicts:
#	src/main/java/org/qortal/block/BlockChain.java
2022-05-02 08:50:08 +01:00
CalDescent
3409086978 Merge branch 'master' into lite-node 2022-05-02 08:31:43 +01:00
CalDescent
6c201db3dd Merge branch 'EPC-fixes' 2022-05-02 08:28:14 +01:00
CalDescent
da47df0a25 Fixed merge issue. 2022-05-01 16:41:56 +01:00
CalDescent
eea215dacf Merge branch 'ravencoin' into new-coins
# Conflicts:
#	pom.xml
#	src/main/java/org/qortal/controller/tradebot/TradeBot.java
#	src/main/java/org/qortal/crosschain/SupportedBlockchain.java
#	src/main/java/org/qortal/settings/Settings.java
2022-05-01 16:28:22 +01:00
CalDescent
0949271dda Merge branch 'digibyte' into new-coins 2022-05-01 16:25:40 +01:00
CalDescent
6bb9227159 Removed RavencoinACCTv1
Also removed CrossChainRavencoinACCTv1Resource - same as Digibyte.
2022-05-01 16:17:00 +01:00
CalDescent
a95a37277c Removed DigibyteACCTv1 and v2
Also removed CrossChainDigibyteACCTv1Resource, since this is unused, and it seems excessive to maintain support of this for every coin (and potentially every ACCT version).
2022-05-01 16:12:12 +01:00
CalDescent
48b9aa5c18 Allow images to be displayed in QDN websites via data: and blob: 2022-05-01 14:37:57 +01:00
CalDescent
1d7203a6fb Bug fixes found when testing previous commits. 2022-05-01 14:29:24 +01:00
CalDescent
1030b00f0a Keep track of peers requesting data for which we have at least one chunk. Then allow subsequent incoming connections from that peer through, up to a maximum of maxDataPeers.
Direct connections for arbitrary data are currently unlikely to succeed, because those allowing incoming connections generally have their slots maxed out and have reached maxPeers. The idea here is that some connections remain reserved for dedicated arbitrary data transfers, therefore temporarily circumventing the limit (up to a defined maximum number of reserved connections).

Arbitrary data connections will auto disconnect after 2 minutes (we might be able to reduce this at a later date), and it also probably makes sense for the requesting node to disconnect as soon as it has all the chunks that it needs (this part isn't implemented yet).

One downside of this feature is that the listen socket is now going to be accepting connections most of the time, since it is unlikely that we will regularly have 4 data peers connected. This could be improved by modifying the OP_ACCEPT behaviour based on whether we are expecting any data peers to connect. In most cases, this would allow it to remain closed. But for the sake of simplicity I will leave that optimization for a future commit.
2022-05-01 14:02:44 +01:00
CalDescent
0c16d1fc11 Added "maxDataPeerConnectionTime" setting (default 2 mins).
This is used to force a quick disconnect for peers that are only connecting for the purposes of requesting data for a specific arbitrary transaction signature.
2022-05-01 14:02:44 +01:00
CalDescent
ed04375385 Increased default maxPeers from 32 to 36 to compensate - otherwise the network will lose a considerable amount of inbound capacity. 2022-05-01 14:02:44 +01:00
CalDescent
6e49d20383 Added "maxDataPeers" setting to reserve 4 connections by default for direct QDN data requests. 2022-05-01 14:02:44 +01:00
CalDescent
dc34eed203 Include our address when requesting QDN data 2022-05-01 14:02:44 +01:00
CalDescent
fbe4f3fad8 Fixed incorrect minOutboundPeers conditional 2022-05-01 14:01:58 +01:00
CalDescent
e7ee3a06c7 Merge branch 'EPC-fixes' into lite-node 2022-04-30 15:33:07 +01:00
CalDescent
599877195b Merge branch 'master' into EPC-fixes 2022-04-30 15:32:44 +01:00
CalDescent
7f9d267992 Improved lite node response error logging. 2022-04-30 15:32:23 +01:00
CalDescent
52904db413 Migrated new lite node message types to new format. 2022-04-30 15:22:50 +01:00
CalDescent
5e0bde226a Merge branch 'EPC-fixes' into lite-node
# Conflicts:
#	src/main/java/org/qortal/network/message/Message.java
2022-04-30 13:25:02 +01:00
CalDescent
0695039ee3 Fixed long term bug causing last line to be missed out. 2022-04-30 12:08:10 +01:00
CalDescent
a4bcd4451c Added "tail" parameter to GET /admin/logs to allow returning the last X (limit) lines.
This should make it easy to display core logs in the UI.
2022-04-30 12:07:47 +01:00
CalDescent
e5b4b61832 Fixed bugs causing "Hash ... does not match file digest ..." errors 2022-04-30 11:26:05 +01:00
CalDescent
dd55dc277b Updated AdvancedInstaller project for v3.2.5 2022-04-27 08:50:40 +01:00
CalDescent
81ef1ae964 Bump version to 3.2.5 2022-04-26 20:17:57 +01:00
CalDescent
46701e4de7 Revert "Remove peers with unknown height, lower height or same height and same block signature (unless we don't have their block signature)"
This reverts commit 895f02f178.
2022-04-26 19:52:08 +01:00
QuickMythril
0f52ccb433 add Ravencoin ACCTs 2022-04-26 13:51:19 -04:00
QuickMythril
8aed84e6af add Digibyte ACCTs 2022-04-26 11:40:42 -04:00
CalDescent
568497e1c5 Updated AdvancedInstaller project for v3.2.4 2022-04-25 09:03:57 +01:00
CalDescent
f3f8e0013d Bump version to 3.2.4 2022-04-24 17:48:22 +01:00
CalDescent
d03c145189 Added to testRegisterNameFeeIncrease() test to catch the recently detected bug. 2022-04-24 17:41:24 +01:00
CalDescent
682a5fde94 Revert "Attempt to fix core startup problems on some systems (GNOME Desktop?) by adding defensiveness to GUI elements."
This reverts commit 311f41c610.
2022-04-24 15:54:20 +01:00
CalDescent
cca5bac30a Fixed logic bug in name registration fee calculation. 2022-04-24 15:36:36 +01:00
CalDescent
64e102a8c6 Name registration fee reduction to 1.25 QORT set to Sun, 01 May 2022 16:00:00 GMT 2022-04-24 15:27:21 +01:00
CalDescent
f9972f50e0 Updated altcoinj 2022-04-24 15:08:43 +01:00
QuickMythril
05d9a7e820 Switched to Qortal fork of altcoinj, using RavencoinMainNetParams 2022-04-23 08:28:12 -04:00
CalDescent
df290950ea Reduce log spam in BlockMinter 2022-04-23 12:32:06 +01:00
CalDescent
ae64be4802 Retry scheduled repository maintenance up to 5 times, as it's common for it to timeout waiting for the repository. Subsequent retries normally succeed. 2022-04-23 12:31:15 +01:00
CalDescent
348f3c382e Merge branch 'master' into lite-node 2022-04-22 20:40:47 +01:00
CalDescent
d98678fc5f Renamed SECRET_LENGTH to SECRET_SIZE_LENGTH. Thanks to catbref for finding this. 2022-04-22 20:40:13 +01:00
CalDescent
1da157d33f Added separate AT serialization tests, based on generic transaction serialization tests. This allows for testing both MESSAGE-type and PAYMENT-type AT transactions. 2022-04-22 20:36:22 +01:00
CalDescent
de4f004a08 Bump to transaction version 6 at a future undecided timestamp. 2022-04-22 20:35:17 +01:00
CalDescent
522ef282c8 Added support for deserialization of MESSAGE-type AT transactions (requires transaction version 6) 2022-04-22 20:34:42 +01:00
CalDescent
b5522ea260 Added support for PAYMENT-type AT transactions in serialization tests 2022-04-22 20:06:37 +01:00
CalDescent
b1f184c493 Use DigibyteMainNetParams 2022-04-22 16:31:44 +01:00
CalDescent
d66dd51bf6 Switched to Qortal fork of altcoinj, with Digibyte support 2022-04-22 16:31:32 +01:00
QuickMythril
0baed55a44 add DGB wallet 2022-04-21 11:40:17 -04:00
QuickMythril
390b359761 add RVN wallet 2022-04-21 11:38:49 -04:00
CalDescent
311f41c610 Attempt to fix core startup problems on some systems (GNOME Desktop?) by adding defensiveness to GUI elements. 2022-04-20 08:41:37 +01:00
CalDescent
0a156c76a2 Fix for NPE observed on the EPC-fixes branch (but putting the fix on master in case unrelated) 2022-04-20 08:38:59 +01:00
CalDescent
70eaaa9e3b Merge remote-tracking branch 'catbref/EPC-fixes' into EPC-fixes 2022-04-19 20:50:32 +01:00
catbref
3e622f7185 EPC-fixes: catch CancelledKeyExceptions thrown in short window between nextSelectionKey.isValid() and nextSelectionKey.isXXXable() calls 2022-04-18 14:33:05 +01:00
CalDescent
3f12be50ac Merge remote-tracking branch 'catbref/EPC-fixes' into EPC-fixes 2022-04-18 09:20:55 +01:00
catbref
68412b49a1 EPC-fixes: use bindAddress from Settings for outgoing peer connections, not just listen socket 2022-04-17 19:38:50 +01:00
catbref
c9b2620461 EPC-fixes: fix constructing GET_ONLINE_ACCOUNTS_V2 message for case where onlineAccount args is empty list 2022-04-17 19:37:28 +01:00
CalDescent
337b03aa68 Catch java.util.ServiceConfigurationError in Gui.loadImage() 2022-04-17 17:59:29 +01:00
catbref
df3f16ccf1 EPC-fixes: Improve Network shutdown by exiting fast during broadcast and skipping callbacks during peer disconnect. 2022-04-17 14:50:15 +01:00
catbref
22aa5c41b5 WIP: EPC-fixes
BlockMessage was broken because the repository 'connection' associated with the message's Block object was closed between message queuing and message sending.

The fix was to serialize Message subclasses on construction, thus freeing reliance on objects passed into constructor.
The serialized byte[] is held by the message between queuing and sending.
This forces messages into one of two 'modes': outgoing or incoming.
Outgoing messages contain serialized byte[] whereas incoming messages unpack a ByteBuffer into Message subclass fields.
As a result, all network message types have been refactored in this way.
More details in Message's class comment.

A knock-on effect is that incoming messages cannot then be sent out - a new message needs to be constructed.
Some changes needed to Arbitrary controller package classes in this respect.

Bonus: Network no longer needs broadcast threads because 'broadcasting' is now simply the act of queuing a message for many peers.
2022-04-17 14:50:15 +01:00
catbref
8e09567221 EPC-fixed: avoiding some CancelledKeyExceptions 2022-04-17 14:50:15 +01:00
catbref
3505788d42 Another chunk of improvements to networking / EPC.
Instead of synchronizing/blocking in Peer.sendMessage(),
we queue messages in a concurrent blocking TransferQueue, with timeout.

In EPC, ChannelWriteTasks consume from TransferQueue, unblocking callers to Peer.sendMessage().

If a new message is to be sent, or socket output buffer is full,
then OP_WRITE is used to wait for socket to become writable again.

Only one ChannelWriteTask per peer can be active/pending at a time.
Each ChannelWriteTask tries to send as much as it can in one go.

Other minor tidy-ups.
2022-04-17 14:50:15 +01:00
catbref
91e0c9b940 More improvements to networking:
As per work done by szisti in PR#45:
Extracted network 'Tasks' to their own classes.
Network.NetworkProcessor reduced to only producing Tasks.

Improved usage of SocketChannel interest-ops.
Eventually this might lead to reducing task-producing synchronization lock into more granular locks.
Work still needed to convert sending messages to a queue and to make use of OP_WRITE instead of sleeping to wait for socket buffer to empty.

Disabled PeerConnectTask producer from checking against connected peers via DNS as it's too slow.

Swapped Peer's replyQueues from SynchronizedMap(wrapped HashMap) to ConcurrentHashMap.

Other minor changes within networking.
2022-04-17 14:50:15 +01:00
catbref
00996b047f Networking work-in-progress:
As per work done by szisti in PR#45:
Extracted MessageException from inside Message into its own class.
Extracted MessageType from inside Message into its own class.

Converted reflection-based Message.fromByteBuffer method call to non-reflection, functional interface, method-reference.
This should have minor performance improvement but stronger method signature and type enforcement, as well as better IDE integration.

Message.fromByteBuffer method 'contract' tightened up to:
1. throw BufferUnderflowException if there are not enough bytes to deserialize message
2. throw MessageException if bytes contain invalid data
3. should not return null

Message.toData method 'contract' tightened up to:
1. return null if the message has no payload to serialize
2. throw IOException directly - no need to try-catch in each subclass

Several Message-subclass fields now marked 'final' as per IDE suggestion.
Several Message-subclass fromByteBuffer() method signatures have changed 'throws' list.
Several bytes.remaining() != some-value changed to bytes.remaining() < some-value as per new contract.

Some bytes.remaining() checks removed for fixed-length messages because we can rely on ByteBuffer throwing BufferUnderflowException.
Some bytes.remaining() checks retained for variable-length messages, or messages that read a large amount of data, to prevent wasted memory allocations.

Other minor tidying up
2022-04-17 14:50:15 +01:00
catbref
44fc0f367d Networking work-in-progress:
Temporarily increase sleep from 1ms to 100ms when waiting for outgoing socket buffer to empty.

Real fix is to rewrite using an outgoing message queue and OP_WRITE interest op.
2022-04-17 14:50:15 +01:00
catbref
b0e6259073 Networking work-in-progress:
De-register a peer's socket channel OP_READ interest op when producing a ChannelTask for that peer.
This should prevent duplicate ChannelTasks for the same peer.

Re-register OP_READ once node has read from peer's channel.
2022-04-17 14:50:15 +01:00
catbref
6255b2a907 Networking work-in-progress:
When node has reached max connections, Network will ignore pending incoming connections by:
1. not calling accept()
2. de-registering OP_ACCEPT 'interest op' on the listen socket's channel

When a peer disconnects, Network might re-register OP_ACCEPT interest op on listen socket.
2022-04-17 14:50:15 +01:00
catbref
a5fb0be274 Fix Network.disconnectPeer(PeerAddress) to prevent removeIf() on UnmodifiableList throwing UnsupportedOperationException 2022-04-17 14:50:15 +01:00
catbref
e835f6d998 ExecuteProduceConsume:
Slight reworking of EPC to simplify when producer can block
and generally make some of the conditional code more readable.

Improved logging with task class names and logging level editable during runtime!
Use /peer/enginestats?newLoggingLevel=DEBUG (or TRACE or back to INFO) to change.
2022-04-17 14:50:15 +01:00
CalDescent
54ff564bb1 Set name for transaction importer thread 2022-04-16 23:32:42 +01:00
CalDescent
f8a5ded0ba Fix for bug introduced in commit cfe9252 2022-04-16 23:20:49 +01:00
CalDescent
a1be66f02b Temporarily ease the filtering of lite node peers, in order to make development easier. 2022-04-16 20:52:31 +01:00
CalDescent
0815ad2cf0 Added AT transaction deserialization, to all them to be sent in messages for lite nodes.
Note that it is currently not easy to distinguish between MESSAGE-type and PAYMENT-type AT transactions, so PAYMENT-type is currently the only one supported (and used). A hard fork will likely be needed in order to specify the type within each message.
2022-04-16 20:52:31 +01:00
CalDescent
3484047ad4 Added GET /transactions/address/{address} API endpoint
This is a more standardized alternative to using GET /transactions/search?address=xyz. This avoids the need to build full transaction search ability into the lite node protocols right away.
2022-04-16 20:52:31 +01:00
CalDescent
a63fa1cce5 Added GET_ACCOUNT_TRANSACTIONS message, as well as a generic TRANSACTIONS message for responses. 2022-04-16 20:52:31 +01:00
CalDescent
59119ebc3b Added GET_NAME message to allow lookups from name to owner (or any other name data). 2022-04-16 20:52:31 +01:00
CalDescent
276f1b7e68 Fixed small errors in earlier commits. 2022-04-16 20:52:31 +01:00
CalDescent
c482e5b5ca Added GET_ACCOUNT_NAMES message to request names for an address, and a generic NAMES message to return a list of NameData objects. The generic NAMES message can be reused for many other responses, such as requesting the various lists of names that the API supports. 2022-04-16 20:52:31 +01:00
CalDescent
8c3e0adf35 Added message types to fetch account details and account balances, and use these in various APIs.
This should bring in enough data for very basic chat and wallet functionality (using addresses rather than registered names).

Data currently comes from a single random peer, however this can be expanded to request from multiple peers to gain confidence in the accuracy of the data. If bad data is returned from a peer, it's not the end of the world since the transaction would just be considered invalid by full nodes and would be thrown out. But this should be mostly avoidable by taking data from multiple sources to improve confidence in its accuracy.
2022-04-16 20:52:30 +01:00
CalDescent
64ff3ac672 Improved comment 2022-04-16 20:52:30 +01:00
CalDescent
cfe92525ed Disable various core functions when running as a lite node.
Lite nodes can't sync or mint blocks, and they also have a very limited ability to verify unconfirmed transactions due to a lack of contextual information (i.e. the blockchain). For now, most validation is skipped and they simply act as relays to help get transactions around the network. Full and topOnly nodes will disregard any invalid transactions upon receipt as usual, and since the lite nodes aren't signing any blocks, there is little risk to the reduced validation, other than the experience of the lite node itself. This can be tightened up considerably as the lite nodes become more powerful, but the current approach works as a PoC.
2022-04-16 20:52:30 +01:00
CalDescent
0e3a9ee2b2 Return the node "type" (full / topOnly / lite) in GET /admin/info endpoint.
This can used by the UI to hide features that aren't supported on lite nodes.
2022-04-16 20:50:27 +01:00
CalDescent
a921db2cc6 Added "lite" setting to designate the core as a lite node. 2022-04-16 20:50:27 +01:00
CalDescent
3d99f86630 Improved logging 2022-04-16 20:50:00 +01:00
CalDescent
8d1a58ec06 POW_DIFFICULTY_NO_QORT reduced from 14 to 12 (around 4x faster) 2022-04-16 12:36:32 +01:00
CalDescent
2e5a7cb5a1 Adapted Blockchain.java to use lookup table for name registration fees, to more easily support fee adjustments.
This is currently for name registration transactions only, but can be adapted (or duplicated) for other transaction types when needed.

Note: this switches from a greater-than (>) to a greater-than-or-equal (>=) timestamp comparison, as it makes more sense this way. It shouldn't affect the previous transition since there were no REGISTER_NAME transactions at that exact timestamp.
2022-04-16 12:20:03 +01:00
CalDescent
895f02f178 Remove peers with unknown height, lower height or same height and same block signature (unless we don't have their block signature)
Adapted from code originally written by catbref from before genesis, and essentially prevents syncing backwards. This needs significant testing on testnet.
2022-04-16 11:30:07 +01:00
CalDescent
c59869982b Fix for system-wide QDN issues occuring when the metadata file has an empty chunks array.
It is quite likely that existing resources with both metadata and an empty chunks array will need to be republished, because this bug may have led to incorrect file deletions.
2022-04-16 11:25:44 +01:00
CalDescent
3b3368f950 Merge pull request #85 from QuickMythril/member-count
Add member count to each group returned by GET /member/{address}
2022-04-16 11:00:35 +01:00
QuickMythril
3f02c760c2 Add member count to each group returned by GET /member/{address} 2022-04-15 06:23:10 -04:00
CalDescent
fee603e500 Add member count to each group returned by GET /groups (expanded on code written by QuickMythril) 2022-04-15 10:19:43 +01:00
QuickMythril
ad31d8014d get memberCount with Group Data
works for lookup by groupId
2022-04-14 22:08:52 -04:00
CalDescent
58a0ac74d2 Merge pull request #84 from catbref/ByteArray
Improvements to ByteArray to leverage Java 11 'native' Arrays methods
2022-04-14 21:30:59 +01:00
CalDescent
184984c16f Added ElectrumX equivalent for Pirate Chain, to communicate with a remote lightwalletd
This will most likely be used for by the trade bot, rather than for any wallet functionality.
2022-04-14 20:18:58 +01:00
CalDescent
2cf7a5e114 Generated java gRPC protocol buffers for Pirate Chain network comms.
The command used was:

./protoc --plugin=protoc-gen-grpc-java=/Users/user/Downloads/protoc-gen-grpc-java-1.45.1-osx-x86_64.exe -I=src/main/resources/proto/zcash/ --java_out=src/main/java/ --grpc-java_out=src/main/java/ src/main/resources/proto/zcash/service.proto

Then repeat, replacing service.proto with compact_formats.proto and darkside.proto

Darkside isn't needed for mainnet functionality, but included for completeness, and might be useful for testing.
2022-04-14 20:07:42 +01:00
QuickMythril
8388aa9c23 update Russian translation
credit: Alexander45 & malina
2022-04-10 15:50:29 -04:00
catbref
c1894d8c00 Improvements to ByteArray to leverage Java 11 'native' Arrays.hashCode and Arrays.compareUnsigned for speed.
Also modified ambiguous ByteArray::new and ByteArray::of to ByteArray::wrap and ByteArray::copyOf.
Modifications to other classes that use ByteArray.
2022-04-10 16:38:02 +01:00
QuickMythril
f7f9cdc518 Merge pull request #83 from aldum/feature/hungarian_translation
fixup grammar; add missing translations
2022-04-09 00:10:37 -04:00
QuickMythril
850d7f8220 add/update translations
credit: johnnyfg (sv), schizo (it), IsBe (nl), Eduardo9999 (es)
2022-04-08 23:57:54 -04:00
aldum
051043283c fixup grammar; add missing translations 2022-04-06 23:21:49 +02:00
QuickMythril
15bc69de01 Merge pull request #82 from JaymenChou/patch-8
Update SysTray_zh_CN.properties
2022-04-05 13:38:13 -04:00
QuickMythril
ee3cfa4d6d fix typo 2022-04-05 13:26:02 -04:00
QuickMythril
df1f3079a5 Merge pull request #81 from JaymenChou/patch-7
Update SysTray_zh_TW.properties
2022-04-05 13:25:06 -04:00
QuickMythril
d9ae8a5552 Merge branch 'master' into patch-8 2022-04-05 13:23:19 -04:00
QuickMythril
2326c31ee7 Merge branch 'master' into patch-7 2022-04-05 13:11:14 -04:00
QuickMythril
91cb0f30dd Updated TransactionValidity translations
added some missing entries, and sorted alphabetically.
2022-04-05 12:51:49 -04:00
QuickMythril
c0307c352c Updated ApiError translations
removed some duplicate entries, and standardized the order
2022-04-05 11:46:32 -04:00
QuickMythril
8fd7c1b313 formatting fix 2022-04-05 11:09:30 -04:00
QuickMythril
b8147659b1 Updated SysTray translations
added some missing entries, and sorted alphabetically.
2022-04-05 10:48:43 -04:00
JaymenChou
7a1bac682f Update SysTray_zh_TW.properties
Add the missing term "PERFORMING_DB_MAINTENANCE" and translate it to Traditional Chinese
2022-04-04 20:36:48 +08:00
JaymenChou
9fdb7c977f Update SysTray_zh_CN.properties
Translate remaining terms to Simple Chinese
2022-04-04 20:33:59 +08:00
JaymenChou
4f3948323b Update SysTray_zh_TW.properties
Translate the remaining terms to Traditional Chinese
2022-04-04 20:31:19 +08:00
QuickMythril
70fcc1f712 Merge pull request #78 from JaymenChou/patch-4
Create ApiError_zh_CN.properties
2022-04-04 02:49:00 -04:00
JaymenChou
f20fe9199f Update ApiError_zh_CN.properties 2022-04-04 14:36:55 +08:00
QuickMythril
91dee4a3b8 Merge pull request #80 from JaymenChou/patch-6
Create TransactionValidity_zh_CN.properties
2022-04-04 02:17:35 -04:00
QuickMythril
0b89b8084e Merge pull request #79 from JaymenChou/patch-5
Create TransactionValidity_zh_TW.properties
2022-04-04 02:17:24 -04:00
QuickMythril
a5a80302b2 Merge pull request #77 from JaymenChou/patch-3
Create ApiError_zh_TW.properties
2022-04-04 02:17:02 -04:00
QuickMythril
e61a24ee7b removed electrum-ltc.bysh.me
this server often gives a false positive for phishing by some antivirus software.
2022-04-03 22:32:57 -04:00
JaymenChou
55ed342b59 Create TransactionValidity_zh_CN.properties
Add Simple Chinese For better understanding of logs
2022-04-03 13:27:52 +08:00
JaymenChou
3c6f79eec0 Create TransactionValidity_zh_TW.properties
Add Traditional Chinese For TransactionValidity Logs.
2022-04-03 13:25:32 +08:00
JaymenChou
590800ac1d Create ApiError_zh_CN.properties
Add Simple Chinese Support For API Error Message 
Hope it helps in understanding the API !
2022-04-03 12:43:18 +08:00
JaymenChou
95c412b946 Create ApiError_zh_TW.properties
Add Traditional Chinese support to API Responses
2022-04-03 12:40:27 +08:00
CalDescent
a232395750 Merge branch 'master' of github.com:Qortal/qortal 2022-04-01 11:24:56 +01:00
QuickMythril
6edbc8b6a5 add decimal precision to download progress 2022-03-31 13:46:40 -04:00
QuickMythril
f8ffeed302 updated BTC electrumx servers
added new and removed TCP, closed servers, and versions older than 1.16.0
2022-03-31 11:32:55 -04:00
QuickMythril
e2ee68427c removed TCP electrumx servers 2022-03-31 11:29:54 -04:00
QuickMythril
74ff23239d removed TCP electrumx servers 2022-03-31 11:27:56 -04:00
QuickMythril
f1fa2ba2f6 added SSL electrumx servers 2022-03-31 10:02:31 -04:00
QuickMythril
e1522cec94 updated LTC electrumx servers 2022-03-31 09:58:53 -04:00
QuickMythril
8841b3cbb1 add spanish translations 2022-03-31 08:44:33 -04:00
CalDescent
94260bd93f Decreased the number of retries for missing metadata, to reduce broadcast spam. 2022-03-30 08:23:22 +01:00
CalDescent
15ff8af7ac Don't process trade bots or broadcast presence timestamps if our chain is more than 30 minutes old 2022-03-30 08:11:02 +01:00
CalDescent
d420033b36 Revert "Revert "Add Qortal AT FunctionCodes for getting account level / blocks minted + tests""
This reverts commit 59025b8f47.
2022-03-30 08:07:07 +01:00
CalDescent
bda63f0310 Removed hardcoded "qortal-backup/TradeBotStates.json" from POST /admin/repository/data API, as it's no longer needed now that API keys are required. 2022-03-30 08:06:09 +01:00
QuickMythril
54add26ccb fixed typo 2022-03-25 23:39:41 -04:00
CalDescent
089b068362 Updated AdvancedInstaller project for v3.2.3 2022-03-19 22:38:58 +00:00
CalDescent
fe474b4507 Bump version to 3.2.3 2022-03-19 20:44:41 +00:00
CalDescent
bbe15b563c Added unit test to simulate recent issue.
This fails with the 3.2.2 code but now passes when using the latest fixes.
2022-03-19 20:41:38 +00:00
CalDescent
59025b8f47 Revert "Add Qortal AT FunctionCodes for getting account level / blocks minted + tests"
This reverts commit eb9b94b9c6.
2022-03-19 19:52:14 +00:00
443 changed files with 68973 additions and 7015 deletions

View File

@@ -8,16 +8,16 @@ jobs:
mavenTesting:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Cache local Maven repository
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up the Java JDK
uses: actions/setup-java@v2
uses: actions/setup-java@v3
with:
java-version: '11'
distribution: 'adopt'

2
.gitignore vendored
View File

@@ -14,7 +14,6 @@
/.mvn.classpath
/notes*
/settings.json
/testnet*
/settings*.json
/testchain*.json
/run-testnet*.sh
@@ -28,6 +27,7 @@
/WindowsInstaller/Install Files/qortal.jar
/*.7z
/tmp
/wallets
/data*
/src/test/resources/arbitrary/*/.qortal/cache
apikey.txt

911
Q-Apps.md Normal file
View File

@@ -0,0 +1,911 @@
# Qortal Project - Q-Apps Documentation
## Introduction
Q-Apps are static web apps written in javascript, HTML, CSS, and other static assets. The key difference between a Q-App and a fully static site is its ability to interact with both the logged-in user and on-chain data. This is achieved using the API described in this document.
# Section 0: Basic QDN concepts
## Introduction to QDN resources
Each published item on QDN (Qortal Data Network) is referred to as a "resource". A resource could contain anything from a few characters of text, to a multi-layered directory structure containing thousands of files.
Resources are stored on-chain, however the data payload is generally stored off-chain, and verified using an on-chain SHA-256 hash.
To publish a resource, a user must first register a name, and then include that name when publishing the data. Accounts without a registered name are unable to publish to QDN from a Q-App at this time.
Owning the name grants update privileges to the data. If that name is later sold or transferred, the permission to update that resource is moved to the new owner.
## Name, service & identifier
Each QDN resource has 3 important fields:
- `name` - the registered name of the account that is publishing the data (which will hold update/edit privileges going forwards).<br /><br />
- `service` - the type of content (e.g. IMAGE or JSON). Different services have different validation rules. See [list of available services](#services).<br /><br />
- `identifier` - an optional string to allow more than one resource to exist for a given name/service combination. For example, the name `QortalDemo` may wish to publish multiple images. This can be achieved by using a different identifier string for each. The identifier is only unique to the name in question, and so it doesn't matter if another name is using the same service and identifier string.
## Shared identifiers
Since an identifier can be used by multiple names, this can be used to the advantage of Q-App developers as it allows for data to be stored in a deterministic location.
An example of this is the user's avatar. This will always be published with service `THUMBNAIL` and identifier `qortal_avatar`, along with the user's name. So, an app can display the avatar of a user just by specifying their name when requesting the data. The same applies when publishing data.
## "Default" resources
A "default" resource refers to one without an identifier. For example, when a website is published via the UI, it will use the user's name and the service `WEBSITE`. These do not have an identifier, and are therefore the "default" website for this name. When requesting or publishing data without an identifier, apps can either omit the `identifier` key entirely, or include `"identifier": "default"` to indicate that the resource(s) being queried or published do not have an identifier.
<a name="services"></a>
## Available service types
Here is a list of currently available services that can be used in Q-Apps:
### Public services ###
The services below are intended to be used for publicly accessible data.
IMAGE,
THUMBNAIL,
VIDEO,
AUDIO,
PODCAST,
VOICE,
ARBITRARY_DATA,
JSON,
DOCUMENT,
LIST,
PLAYLIST,
METADATA,
BLOG,
BLOG_POST,
BLOG_COMMENT,
GIF_REPOSITORY,
ATTACHMENT,
FILE,
FILES,
CHAIN_DATA,
STORE,
PRODUCT,
OFFER,
COUPON,
CODE,
PLUGIN,
EXTENSION,
GAME,
ITEM,
NFT,
DATABASE,
SNAPSHOT,
COMMENT,
CHAIN_COMMENT,
WEBSITE,
APP,
QCHAT_ATTACHMENT,
QCHAT_IMAGE,
QCHAT_AUDIO,
QCHAT_VOICE
### Private services ###
For the services below, data is encrypted for a single recipient, and can only be decrypted using the private key of the recipient's wallet.
QCHAT_ATTACHMENT_PRIVATE,
ATTACHMENT_PRIVATE,
FILE_PRIVATE,
IMAGE_PRIVATE,
VIDEO_PRIVATE,
AUDIO_PRIVATE,
VOICE_PRIVATE,
DOCUMENT_PRIVATE,
MAIL_PRIVATE,
MESSAGE_PRIVATE
## Single vs multi-file resources
Some resources, such as those published with the `IMAGE` or `JSON` service, consist of a single file or piece of data (the image or the JSON string). This is the most common type of QDN resource, especially in the context of Q-Apps. These can be published by supplying a base64-encoded string containing the data.
Other resources, such as those published with the `WEBSITE`, `APP`, or `GIF_REPOSITORY` service, can contain multiple files and directories. Publishing these kinds of files is not yet available for Q-Apps, however it is possible to retrieve multi-file resources that are already published. When retrieving this data (via FETCH_QDN_RESOURCE), a `filepath` must be included to indicate the file that you would like to retrieve. There is no need to specify a filepath for single file resources, as these will automatically return the contents of the single file.
## App-specific data
Some apps may want to make all QDN data for a particular service available. However, others may prefer to only deal with data that has been published by their app (if a specific format/schema is being used for instance).
Identifiers can be used to allow app developers to locate data that has been published by their app. The recommended approach for this is to use the app name as a prefix on all identifiers when publishing data.
For instance, an app called `MyApp` could allow users to publish JSON data. The app could choose to prefix all identifiers with the string `myapp_`, and then use a random string for each published resource (resulting in identifiers such as `myapp_qR5ndZ8v`). Then, to locate data that has potentially been published by users of MyApp, it can later search the QDN database for items with `"service": "JSON"` and `"identifier": "myapp_"`. The SEARCH_QDN_RESOURCES action has a `prefix` option in order to match identifiers beginning with the supplied string.
Note that QDN is a permissionless system, and therefore it's not possible to verify that a resource was actually published by the app. It is recommended that apps validate the contents of the resource to ensure it is formatted correctly, instead of making assumptions.
## Updating a resource
To update a resource, it can be overwritten by publishing with the same `name`, `service`, and `identifier` combination. Note that the authenticated account must currently own the name in order to publish an update.
## Routing
If a non-existent `filepath` is accessed, the default behaviour of QDN is to return a `404: File not found` error. This includes anything published using the `WEBSITE` service.
However, routing is handled differently for anything published using the `APP` service.
For apps, QDN automatically sends all unhandled requests to the index file (generally index.html). This allows the app to use custom routing, as it is able to listen on any path. If a file exists at a path, the file itself will be served, and so the request won't be sent to the index file.
It's recommended that all apps return a 404 page if a request isn't able to be routed.
# Section 1: Simple links and image loading via HTML
## Section 1a: Linking to other QDN websites / resources
The `qortal://` protocol can be used to access QDN data from within Qortal websites and apps. The basic format is as follows:
```
<a href="qortal://{service}/{name}/{identifier}/{path}">link text</a>
```
However, the system will support the omission of the `identifier` and/or `path` components to allow for simpler URL formats.
A simple link to another website can be achieved with this HTML code:
```
<a href="qortal://WEBSITE/QortalDemo">link text</a>
```
To link to a specific page of another website:
```
<a href="qortal://WEBSITE/QortalDemo/minting-leveling/index.html">link text</a>
```
To link to a standalone resource, such as an avatar
```
<a href="qortal://THUMBNAIL/QortalDemo/qortal_avatar">avatar</a>
```
For cases where you would prefer to explicitly include an identifier (to remove ambiguity) you can use the keyword `default` to access a resource that doesn't have an identifier. For instance:
```
<a href="qortal://WEBSITE/QortalDemo/default">link to root of website</a>
<a href="qortal://WEBSITE/QortalDemo/default/minting-leveling/index.html">link to subpage of website</a>
```
## Section 1b: Linking to other QDN images
The same applies for images, such as displaying an avatar:
```
<img src="qortal://THUMBNAIL/QortalDemo/qortal_avatar" />
```
...or even an image from an entirely different website:
```
<img src="qortal://WEBSITE/AlphaX/assets/img/logo.png" />
```
# Section 2: Integrating a Javascript app
Javascript apps allow for much more complex integrations with Qortal's blockchain data.
## Section 2a: Direct API calls
The standard [Qortal Core API](http://localhost:12391/api-documentation) is available to websites and apps, and can be called directly using a standard AJAX request, such as:
```
async function getNameInfo(name) {
const response = await fetch("/names/" + name);
const nameData = await response.json();
console.log("nameData: " + JSON.stringify(nameData));
}
getNameInfo("QortalDemo");
```
However, this only works for read-only data, such as looking up transactions, names, balances, etc. Also, since the address of the logged in account can't be retrieved from the core, apps can't show personalized data with this approach.
## Section 2b: User interaction via qortalRequest()
To take things a step further, the qortalRequest() function can be used to interact with the user, in order to:
- Request address and public key of the logged in account
- Publish data to QDN
- Send chat messages
- Join groups
- Deploy ATs (smart contracts)
- Send QORT or any supported foreign coin
- Add/remove items from lists
In addition to the above, qortalRequest() also supports many read-only functions that are also available via direct core API calls. Using qortalRequest() helps with futureproofing, as the core APIs can be modified without breaking functionality of existing Q-Apps.
### Making a request
Qortal core will automatically inject the `qortalRequest()` javascript function to all websites/apps, which returns a Promise. This can be used to fetch data or publish data to the Qortal blockchain. This functionality supports async/await, as well as try/catch error handling.
```
async function myfunction() {
try {
let res = await qortalRequest({
action: "GET_ACCOUNT_DATA",
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
});
console.log(JSON.stringify(res)); // Log the response to the console
} catch(e) {
console.log("Error: " + JSON.stringify(e));
}
}
myfunction();
```
### Timeouts
Request timeouts are handled automatically when using qortalRequest(). The timeout value will differ based on the action being used - see `getDefaultTimeout()` in [q-apps.js](src/main/resources/q-apps/q-apps.js) for the current values.
If a request times out it will throw an error - `The request timed out` - which can be handled by the Q-App.
It is also possible to specify a custom timeout using `qortalRequestWithTimeout(request, timeout)`, however this is discouraged. It's more reliable and futureproof to let the core handle the timeout values.
# Section 3: qortalRequest Documentation
## Supported actions
Here is a list of currently supported actions:
- GET_USER_ACCOUNT
- GET_ACCOUNT_DATA
- GET_ACCOUNT_NAMES
- SEARCH_NAMES
- GET_NAME_DATA
- LIST_QDN_RESOURCES
- SEARCH_QDN_RESOURCES
- GET_QDN_RESOURCE_STATUS
- GET_QDN_RESOURCE_PROPERTIES
- GET_QDN_RESOURCE_METADATA
- GET_QDN_RESOURCE_URL
- LINK_TO_QDN_RESOURCE
- FETCH_QDN_RESOURCE
- PUBLISH_QDN_RESOURCE
- PUBLISH_MULTIPLE_QDN_RESOURCES
- DECRYPT_DATA
- SAVE_FILE
- GET_WALLET_BALANCE
- GET_BALANCE
- SEND_COIN
- SEARCH_CHAT_MESSAGES
- SEND_CHAT_MESSAGE
- LIST_GROUPS
- JOIN_GROUP
- DEPLOY_AT
- GET_AT
- GET_AT_DATA
- LIST_ATS
- FETCH_BLOCK
- FETCH_BLOCK_RANGE
- SEARCH_TRANSACTIONS
- GET_PRICE
- GET_LIST_ITEMS
- ADD_LIST_ITEMS
- DELETE_LIST_ITEM
More functionality will be added in the future.
## Example Requests
Here are some example requests for each of the above:
### Get address of logged in account
_Will likely require user approval_
```
let account = await qortalRequest({
action: "GET_USER_ACCOUNT"
});
let address = account.address;
```
### Get public key of logged in account
_Will likely require user approval_
```
let pubkey = await qortalRequest({
action: "GET_USER_ACCOUNT"
});
let publicKey = account.publicKey;
```
### Get account data
```
let res = await qortalRequest({
action: "GET_ACCOUNT_DATA",
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
});
```
### Get names owned by account
```
let res = await qortalRequest({
action: "GET_ACCOUNT_NAMES",
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
});
```
### Search names
```
let res = await qortalRequest({
action: "SEARCH_NAMES",
query: "search query goes here",
prefix: false, // Optional - if true, only the beginning of the name is matched
limit: 100,
offset: 0,
reverse: false
});
```
### Get name data
```
let res = await qortalRequest({
action: "GET_NAME_DATA",
name: "QortalDemo"
});
```
### List QDN resources
```
let res = await qortalRequest({
action: "LIST_QDN_RESOURCES",
service: "THUMBNAIL",
name: "QortalDemo", // Optional (exact match)
identifier: "qortal_avatar", // Optional (exact match)
default: true, // Optional
includeStatus: false, // Optional - will take time to respond, so only request if necessary
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
followedOnly: false, // Optional - include followed names only
excludeBlocked: false, // Optional - exclude blocked content
limit: 100,
offset: 0,
reverse: true
});
```
### Search QDN resources
```
let res = await qortalRequest({
action: "SEARCH_QDN_RESOURCES",
service: "THUMBNAIL",
query: "search query goes here", // Optional - searches both "identifier" and "name" fields
identifier: "search query goes here", // Optional - searches only the "identifier" field
name: "search query goes here", // Optional - searches only the "name" field
prefix: false, // Optional - if true, only the beginning of fields are matched in all of the above filters
exactMatchNames: true, // Optional - if true, partial name matches are excluded
default: false, // Optional - if true, only resources without identifiers are returned
includeStatus: false, // Optional - will take time to respond, so only request if necessary
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
nameListFilter: "QApp1234Subscriptions", // Optional - will only return results if they are from a name included in supplied list
followedOnly: false, // Optional - include followed names only
excludeBlocked: false, // Optional - exclude blocked content
limit: 100,
offset: 0,
reverse: true
});
```
### Search QDN resources (multiple names)
```
let res = await qortalRequest({
action: "SEARCH_QDN_RESOURCES",
service: "THUMBNAIL",
query: "search query goes here", // Optional - searches both "identifier" and "name" fields
identifier: "search query goes here", // Optional - searches only the "identifier" field
names: ["QortalDemo", "crowetic", "AlphaX"], // Optional - searches only the "name" field for any of the supplied names
prefix: false, // Optional - if true, only the beginning of fields are matched in all of the above filters
default: false, // Optional - if true, only resources without identifiers are returned
includeStatus: false, // Optional - will take time to respond, so only request if necessary
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
nameListFilter: "QApp1234Subscriptions", // Optional - will only return results if they are from a name included in supplied list
followedOnly: false, // Optional - include followed names only
excludeBlocked: false, // Optional - exclude blocked content
limit: 100,
offset: 0,
reverse: true
});
```
### Fetch QDN single file resource
```
let res = await qortalRequest({
action: "FETCH_QDN_RESOURCE",
name: "QortalDemo",
service: "THUMBNAIL",
identifier: "qortal_avatar", // Optional. If omitted, the default resource is returned, or you can alternatively use the keyword "default"
encoding: "base64", // Optional. If omitted, data is returned in raw form
rebuild: false
});
```
### Fetch file from multi file QDN resource
Data is returned in the base64 format
```
let res = await qortalRequest({
action: "FETCH_QDN_RESOURCE",
name: "QortalDemo",
service: "WEBSITE",
identifier: "default", // Optional. If omitted, the default resource is returned, or you can alternatively request that using the keyword "default", as shown here
filepath: "index.html", // Required only for resources containing more than one file
rebuild: false
});
```
### Get QDN resource status
```
let res = await qortalRequest({
action: "GET_QDN_RESOURCE_STATUS",
name: "QortalDemo",
service: "THUMBNAIL",
identifier: "qortal_avatar", // Optional
build: true // Optional - request that the resource is fetched & built in the background
});
```
### Get QDN resource properties
```
let res = await qortalRequest({
action: "GET_QDN_RESOURCE_PROPERTIES",
name: "QortalDemo",
service: "THUMBNAIL",
identifier: "qortal_avatar" // Optional
});
// Returns: filename, size, mimeType (where available)
```
### Get QDN resource metadata
```
let res = await qortalRequest({
action: "GET_QDN_RESOURCE_METADATA",
name: "QortalDemo",
service: "THUMBNAIL",
identifier: "qortal_avatar" // Optional
});
```
### Publish a single file to QDN
_Requires user approval_.<br />
Note: this publishes a single, base64-encoded file. Multi-file resource publishing (such as a WEBSITE or GIF_REPOSITORY) is not yet supported via a Q-App. It will be added in a future update.
```
let res = await qortalRequest({
action: "PUBLISH_QDN_RESOURCE",
name: "Demo", // Publisher must own the registered name - use GET_ACCOUNT_NAMES for a list
service: "IMAGE",
identifier: "myapp-image1234" // Optional
data64: "base64_encoded_data",
// filename: "image.jpg", // Optional - to help apps determine the file's type
// title: "Title", // Optional
// description: "Description", // Optional
// category: "TECHNOLOGY", // Optional
// tag1: "any", // Optional
// tag2: "strings", // Optional
// tag3: "can", // Optional
// tag4: "go", // Optional
// tag5: "here", // Optional
// encrypt: true, // Optional - to be used with a private service
// recipientPublicKey: "publickeygoeshere" // Only required if `encrypt` is set to true
});
```
### Publish multiple resources at once to QDN
_Requires user approval_.<br />
Note: each resource being published consists of a single, base64-encoded file, each in its own transaction. Useful for publishing two or more related things, such as a video and a video thumbnail.
```
let res = await qortalRequest({
action: "PUBLISH_MULTIPLE_QDN_RESOURCES",
resources: [
name: "Demo", // Publisher must own the registered name - use GET_ACCOUNT_NAMES for a list
service: "IMAGE",
identifier: "myapp-image1234" // Optional
data64: "base64_encoded_data",
// filename: "image.jpg", // Optional - to help apps determine the file's type
// title: "Title", // Optional
// description: "Description", // Optional
// category: "TECHNOLOGY", // Optional
// tag1: "any", // Optional
// tag2: "strings", // Optional
// tag3: "can", // Optional
// tag4: "go", // Optional
// tag5: "here", // Optional
// encrypt: true, // Optional - to be used with a private service
// recipientPublicKey: "publickeygoeshere" // Only required if `encrypt` is set to true
],
[
... more resources here if needed ...
]
});
```
### Decrypt encrypted/private data
```
let res = await qortalRequest({
action: "DECRYPT_DATA",
encryptedData: 'qortalEncryptedDatabMx4fELNTV+ifJxmv4+GcuOIJOTo+3qAvbWKNY2L1r',
publicKey: 'publickeygoeshere'
});
// Returns base64 encoded string of plaintext data
```
### Prompt user to save a file to disk
Note: mimeType not required but recommended. If not specified, saving will fail if the mimeType is unable to be derived from the Blob.
```
let res = await qortalRequest({
action: "SAVE_FILE",
blob: dataBlob,
filename: "myfile.pdf",
mimeType: "application/pdf" // Optional but recommended
});
```
### Get wallet balance (QORT)
_Requires user approval_
```
let res = await qortalRequest({
action: "GET_WALLET_BALANCE",
coin: "QORT"
});
```
### Get address or asset balance
```
let res = await qortalRequest({
action: "GET_BALANCE",
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
});
```
```
let res = await qortalRequest({
action: "GET_BALANCE",
assetId: 1,
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
});
```
### Send QORT to address
_Requires user approval_
```
let res = await qortalRequest({
action: "SEND_COIN",
coin: "QORT",
destinationAddress: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2",
amount: 1.00000000 // 1 QORT
});
```
### Send foreign coin to address
_Requires user approval_<br />
Note: default fees can be found [here](https://github.com/Qortal/qortal-ui/blob/master/plugins/plugins/core/qdn/browser/browser.src.js#L205-L209).
```
let res = await qortalRequest({
action: "SEND_COIN",
coin: "LTC",
destinationAddress: "LSdTvMHRm8sScqwCi6x9wzYQae8JeZhx6y",
amount: 1.00000000, // 1 LTC
fee: 0.00000020 // Optional fee per byte (default fee used if omitted, recommended) - not used for QORT or ARRR
});
```
### Search or list chat messages
```
let res = await qortalRequest({
action: "SEARCH_CHAT_MESSAGES",
before: 999999999999999,
after: 0,
txGroupId: 0, // Optional (must specify either txGroupId or two involving addresses)
// involving: ["QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2", "QSefrppsDCsZebcwrqiM1gNbWq7YMDXtG2"], // Optional (must specify either txGroupId or two involving addresses)
// reference: "reference", // Optional
// chatReference: "chatreference", // Optional
// hasChatReference: true, // Optional
encoding: "BASE64", // Optional (defaults to BASE58 if omitted)
limit: 100,
offset: 0,
reverse: true
});
```
### Send a group chat message
_Requires user approval_
```
let res = await qortalRequest({
action: "SEND_CHAT_MESSAGE",
groupId: 0,
message: "Test"
});
```
### Send a private chat message
_Requires user approval_
```
let res = await qortalRequest({
action: "SEND_CHAT_MESSAGE",
destinationAddress: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2",
message: "Test"
});
```
### List groups
```
let res = await qortalRequest({
action: "LIST_GROUPS",
limit: 100,
offset: 0,
reverse: true
});
```
### Join a group
_Requires user approval_
```
let res = await qortalRequest({
action: "JOIN_GROUP",
groupId: 100
});
```
### Deploy an AT
_Requires user approval_
```
let res = await qortalRequest({
action: "DEPLOY_AT",
creationBytes: "12345", // Must be Base58 encoded
name: "test name",
description: "test description",
type: "test type",
tags: "test tags",
amount: 1.00000000, // 1 QORT
assetId: 0,
// fee: 0.002 // optional - will use default fee if excluded
});
```
### Get AT info
```
let res = await qortalRequest({
action: "GET_AT",
atAddress: "ASRUsCjk6fa5bujv3oWYmWaVqNtvxydpPH"
});
```
### Get AT data bytes (base58 encoded)
```
let res = await qortalRequest({
action: "GET_AT_DATA",
atAddress: "ASRUsCjk6fa5bujv3oWYmWaVqNtvxydpPH"
});
```
### List ATs by functionality
```
let res = await qortalRequest({
action: "LIST_ATS",
codeHash58: "4KdJETRAdymE7dodDmJbf5d9L1bp4g5Nxky8m47TBkvA",
isExecutable: true,
limit: 100,
offset: 0,
reverse: true
});
```
### Fetch block by signature
```
let res = await qortalRequest({
action: "FETCH_BLOCK",
signature: "875yGFUy1zHV2hmxNWzrhtn9S1zkeD7SQppwdXFysvTXrankCHCz4iyAUgCBM3GjvibbnyRQpriuy1cyu953U1u5uQdzuH3QjQivi9UVwz86z1Akn17MGd5Z5STjpDT7248K6vzMamuqDei57Znonr8GGgn8yyyABn35CbZUCeAuXju"
});
```
### Fetch block by height
```
let res = await qortalRequest({
action: "FETCH_BLOCK",
height: "1139850"
});
```
### Fetch a range of blocks
```
let res = await qortalRequest({
action: "FETCH_BLOCK_RANGE",
height: "1139800",
count: 20,
reverse: false
});
```
### Search transactions
```
let res = await qortalRequest({
action: "SEARCH_TRANSACTIONS",
// startBlock: 1139000,
// blockLimit: 1000,
txGroupId: 0,
txType: [
"PAYMENT",
"REWARD_SHARE"
],
confirmationStatus: "CONFIRMED",
limit: 10,
offset: 0,
reverse: false
});
```
### Get an estimate of the QORT price
```
let res = await qortalRequest({
action: "GET_PRICE",
blockchain: "LITECOIN",
// maxtrades: 10,
inverse: true
});
```
### Get URL to load a QDN resource
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
```
let url = await qortalRequest({
action: "GET_QDN_RESOURCE_URL",
service: "THUMBNAIL",
name: "QortalDemo",
identifier: "qortal_avatar"
// path: "filename.jpg" // optional - not needed if resource contains only one file
});
```
### Get URL to load a QDN website
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
```
let url = await qortalRequest({
action: "GET_QDN_RESOURCE_URL",
service: "WEBSITE",
name: "QortalDemo",
});
```
### Get URL to load a specific file from a QDN website
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
```
let url = await qortalRequest({
action: "GET_QDN_RESOURCE_URL",
service: "WEBSITE",
name: "AlphaX",
path: "/assets/img/logo.png"
});
```
### Link/redirect to another QDN website
Note: an alternate method is to include `<a href="qortal://WEBSITE/QortalDemo">link text</a>` within your HTML code.
```
let res = await qortalRequest({
action: "LINK_TO_QDN_RESOURCE",
service: "WEBSITE",
name: "QortalDemo",
});
```
### Link/redirect to a specific path of another QDN website
Note: an alternate method is to include `<a href="qortal://WEBSITE/QortalDemo/minting-leveling/index.html">link text</a>` within your HTML code.
```
let res = await qortalRequest({
action: "LINK_TO_QDN_RESOURCE",
service: "WEBSITE",
name: "QortalDemo",
path: "/minting-leveling/index.html"
});
```
### Get the contents of a list
_Requires user approval_
```
let res = await qortalRequest({
action: "GET_LIST_ITEMS",
list_name: "followedNames"
});
```
### Add one or more items to a list
_Requires user approval_
```
let res = await qortalRequest({
action: "ADD_LIST_ITEMS",
list_name: "blockedNames",
items: ["QortalDemo"]
});
```
### Delete a single item from a list
_Requires user approval_.
Items must be deleted one at a time.
```
let res = await qortalRequest({
action: "DELETE_LIST_ITEM",
list_name: "blockedNames",
item: "QortalDemo"
});
```
# Section 4: Examples
Some example projects can be found [here](https://github.com/Qortal/Q-Apps). These can be cloned and modified, or used as a reference when creating a new app.
## Sample App
Here is a sample application to display the logged-in user's avatar:
```
<html>
<head>
<script>
async function showAvatar() {
try {
// Get QORT address of logged in account
let account = await qortalRequest({
action: "GET_USER_ACCOUNT"
});
let address = account.address;
console.log("address: " + address);
// Get names owned by this account
let names = await qortalRequest({
action: "GET_ACCOUNT_NAMES",
address: address
});
console.log("names: " + JSON.stringify(names));
if (names.length == 0) {
console.log("User has no registered names");
return;
}
// Download base64-encoded avatar of the first registered name
let avatar = await qortalRequest({
action: "FETCH_QDN_RESOURCE",
name: names[0].name,
service: "THUMBNAIL",
identifier: "qortal_avatar",
encoding: "base64"
});
console.log("Avatar size: " + avatar.length + " bytes");
// Display the avatar image on the screen
document.getElementById("avatar").src = "data:image/png;base64," + avatar;
} catch(e) {
console.log("Error: " + JSON.stringify(e));
}
}
</script>
</head>
<body onload="showAvatar()">
<img width="500" id="avatar" />
</body>
</html>
```
# Section 5: Testing and Development
Publishing an in-development app to mainnet isn't recommended. There are several options for developing and testing a Q-app before publishing to mainnet:
### Preview mode
Select "Preview" in the UI after choosing the zip. This allows for full Q-App testing without the need to publish any data.
### Testnets
For an end-to-end test of Q-App publishing, you can use the official testnet, or set up a single node testnet of your own (often referred to as devnet) on your local machine. See [Single Node Testnet Quick Start Guide](testnet/README.md#quick-start).
### Debugging
It is recommended that you develop and test in a web browser, to allow access to the javascript console. To do this:
1. Open the UI app, then minimise it.
2. In a Chromium-based web browser, visit: http://localhost:12388/
3. Log in to your account and then preview your app/website.
4. Go to `View > Developer > JavaScript Console`. Here you can monitor console logs, errors, and network requests from your app, in the same way as any other web-app.

View File

@@ -17,10 +17,10 @@
<ROW Property="Manufacturer" Value="Qortal"/>
<ROW Property="MsiLogging" MultiBuildValue="DefaultBuild:vp"/>
<ROW Property="NTP_GOOD" Value="false"/>
<ROW Property="ProductCode" Value="1033:{FA630CAE-BC27-4ED9-8331-D6DB024F9EAD} 1049:{248A0A43-83A9-44AB-8E4A-74D32A1343CF} 2052:{89EF02B5-73A2-4B73-A18F-BF94F986AD32} 2057:{08C62724-5123-4F13-ACF4-F9358D7BFE98} " Type="16"/>
<ROW Property="ProductCode" Value="1033:{CB85115E-ECCE-4B3D-BB7F-6251A2764922} 1049:{09AC1C62-4E33-4312-826A-38F597ED1B17} 2052:{3CF701B3-E118-4A31-A4B7-156CEA19FBCC} 2057:{468F337D-0EF8-41D1-B5DE-4EEE66BA2AF6} " Type="16"/>
<ROW Property="ProductLanguage" Value="2057"/>
<ROW Property="ProductName" Value="Qortal"/>
<ROW Property="ProductVersion" Value="3.2.2" Type="32"/>
<ROW Property="ProductVersion" Value="3.8.5" Type="32"/>
<ROW Property="RECONFIG_NTP" Value="true"/>
<ROW Property="REMOVE_BLOCKCHAIN" Value="YES" Type="4"/>
<ROW Property="REPAIR_BLOCKCHAIN" Value="YES" Type="4"/>
@@ -212,7 +212,7 @@
<ROW Component="ADDITIONAL_LICENSE_INFO_71" ComponentId="{12A3ADBE-BB7A-496C-8869-410681E6232F}" Directory_="jdk.zipfs_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_71" Type="0"/>
<ROW Component="ADDITIONAL_LICENSE_INFO_8" ComponentId="{D53AD95E-CF96-4999-80FC-5812277A7456}" Directory_="java.naming_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_8" Type="0"/>
<ROW Component="ADDITIONAL_LICENSE_INFO_9" ComponentId="{6B7EA9B0-5D17-47A8-B78C-FACE86D15E01}" Directory_="java.net.http_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_9" Type="0"/>
<ROW Component="AI_CustomARPName" ComponentId="{E8A0872C-FDE4-49FD-9A83-8584F8D487D1}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
<ROW Component="AI_CustomARPName" ComponentId="{094B5D07-2258-4A39-9917-2E2F7F6E210B}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
<ROW Component="AI_ExePath" ComponentId="{3644948D-AE0B-41BB-9FAF-A79E70490A08}" Directory_="APPDIR" Attributes="260" KeyPath="AI_ExePath"/>
<ROW Component="APPDIR" ComponentId="{680DFDDE-3FB4-47A5-8FF5-934F576C6F91}" Directory_="APPDIR" Attributes="0"/>
<ROW Component="AccessBridgeCallbacks.h" ComponentId="{288055D1-1062-47A3-AA44-5601B4E38AED}" Directory_="bridge_Dir" Attributes="0" KeyPath="AccessBridgeCallbacks.h" Type="0"/>
@@ -1173,7 +1173,7 @@
<ROW Action="AI_STORE_LOCATION" Type="51" Source="ARPINSTALLLOCATION" Target="[APPDIR]"/>
<ROW Action="AI_SetPermissions" Type="11265" Source="userAccounts.dll" Target="OnSetPermissions" WithoutSeq="true"/>
<ROW Action="CustomizeLog4j2PropertiesScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property(&quot;CustomActionData&quot;);&#13;&#10;var actionDataArray = actionData.split(&quot;|&quot;);&#13;&#10;var appDir = actionDataArray[0];&#13;&#10;var dataFolder = actionDataArray[1] + actionDataArray[2] + &quot;\\&quot;;&#13;&#10;&#13;&#10;var ForReading = 1, ForWriting = 2, ForAppending = 8;&#13;&#10;var fso = new ActiveXObject(&quot;Scripting.FileSystemObject&quot;);&#13;&#10;&#13;&#10;// Make copy&#13;&#10;fso.CopyFile(appDir + &quot;log4j2.properties&quot;, appDir + &quot;log4j2-orig.properties&quot;, true); // overwrite&#13;&#10;&#13;&#10;// Rewrite %AppDir%\log4j2.properties to update logfile storage path&#13;&#10;var fin = fso.OpenTextFile(appDir + &quot;log4j2-orig.properties&quot;, ForReading, false); // no create&#13;&#10;var fout = fso.OpenTextFile(appDir + &quot;log4j2.properties&quot;, ForWriting, true); // can create&#13;&#10;&#13;&#10;// Copy lines with rewriting where necessary&#13;&#10;while( !fin.AtEndOfStream ) {&#13;&#10;&#9;var line = fin.ReadLine();&#13;&#10;&#13;&#10;&#9;var start = line.indexOf(&quot;property.dirname&quot;);&#13;&#10;&#9;if (start &gt; 0) {&#13;&#10;&#9;&#9;// line: # property.dirname = ...appdata...&#13;&#10;&#9;&#9;// uncomment/replace this line for Windows&#13;&#10;&#9;&#9;fout.WriteLine( &quot;property.dirname = &quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) );&#13;&#10;&#9;} else {&#13;&#10;&#9;&#9;// not found - output verbatim&#13;&#10;&#9;&#9;fout.WriteLine( line );&#13;&#10;&#9;}&#13;&#10;}&#13;&#10;&#13;&#10;fin.Close();&#13;&#10;fout.Close();&#13;&#10;" AdditionalSeq="AI_DATA_SETTER_4"/>
<ROW Action="CustomizeSettingsJsonScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property(&quot;CustomActionData&quot;);&#13;&#10;var actionDataArray = actionData.split(&quot;|&quot;);&#13;&#10;var appDir = actionDataArray[0];&#13;&#10;var dataFolder = actionDataArray[1] + actionDataArray[2] + &quot;\\&quot;;&#13;&#10;&#13;&#10;var ForReading = 1, ForWriting = 2, ForAppending = 8;&#13;&#10;var fso = new ActiveXObject(&quot;Scripting.FileSystemObject&quot;);&#13;&#10;&#13;&#10;// Create basic %APPDIR%\settings.json with path to real settings.json in dataFolder&#13;&#10;var fts = fso.OpenTextFile(appDir + &quot;settings.json&quot;, ForWriting, true);&#13;&#10;&#13;&#10;fts.WriteLine( &quot;{&quot; );&#13;&#10;// We need to escape Windows path backslashes to keep JSON valid&#13;&#10;fts.WriteLine( &quot; \&quot;userPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;\&quot;&quot; );&#13;&#10;fts.WriteLine( &quot;}&quot; );&#13;&#10;&#13;&#10;fts.Close();&#13;&#10;&#13;&#10;// Make copy&#13;&#10;fso.CopyFile(dataFolder + &quot;settings.json&quot;, dataFolder + &quot;settings-orig.json&quot;, true); // overwrite&#13;&#10;&#13;&#10;// Rewrite settings.json to update repository path&#13;&#10;var fin = fso.OpenTextFile(dataFolder + &quot;settings-orig.json&quot;, ForReading, false);&#13;&#10;var fout = fso.OpenTextFile(dataFolder + &quot;settings.json&quot;, ForWriting, true);&#13;&#10;&#13;&#10;// First line should contain opening brace&#13;&#10;fout.WriteLine( fin.ReadLine() );&#13;&#10;&#13;&#10;// Append our entries&#13;&#10;fout.WriteLine( &quot; \&quot;repositoryPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;db\&quot;,&quot; );&#13;&#10;fout.WriteLine( &quot; \&quot;dataPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;data\&quot;,&quot; );&#13;&#10;&#13;&#10;// copy rest of settings&#13;&#10;while( !fin.AtEndOfStream ) {&#13;&#10;&#9;fout.WriteLine( fin.ReadLine() );&#13;&#10;}&#13;&#10;&#13;&#10;fin.Close();&#13;&#10;fout.Close();&#13;&#10;" AdditionalSeq="AI_DATA_SETTER_3"/>
<ROW Action="CustomizeSettingsJsonScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property(&quot;CustomActionData&quot;);&#13;&#10;var actionDataArray = actionData.split(&quot;|&quot;);&#13;&#10;var appDir = actionDataArray[0];&#13;&#10;var dataFolder = actionDataArray[1] + actionDataArray[2] + &quot;\\&quot;;&#13;&#10;&#13;&#10;var ForReading = 1, ForWriting = 2, ForAppending = 8;&#13;&#10;var fso = new ActiveXObject(&quot;Scripting.FileSystemObject&quot;);&#13;&#10;&#13;&#10;// Create basic %APPDIR%\settings.json with path to real settings.json in dataFolder&#13;&#10;var fts = fso.OpenTextFile(appDir + &quot;settings.json&quot;, ForWriting, true);&#13;&#10;&#13;&#10;fts.WriteLine( &quot;{&quot; );&#13;&#10;// We need to escape Windows path backslashes to keep JSON valid&#13;&#10;fts.WriteLine( &quot; \&quot;userPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;\&quot;&quot; );&#13;&#10;fts.WriteLine( &quot;}&quot; );&#13;&#10;&#13;&#10;fts.Close();&#13;&#10;&#13;&#10;// Make copy&#13;&#10;fso.CopyFile(dataFolder + &quot;settings.json&quot;, dataFolder + &quot;settings-orig.json&quot;, true); // overwrite&#13;&#10;&#13;&#10;// Rewrite settings.json to update repository path&#13;&#10;var fin = fso.OpenTextFile(dataFolder + &quot;settings-orig.json&quot;, ForReading, false);&#13;&#10;var fout = fso.OpenTextFile(dataFolder + &quot;settings.json&quot;, ForWriting, true);&#13;&#10;&#13;&#10;// First line should contain opening brace&#13;&#10;fout.WriteLine( fin.ReadLine() );&#13;&#10;&#13;&#10;// Append our entries&#13;&#10;fout.WriteLine( &quot; \&quot;repositoryPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;db\&quot;,&quot; );&#13;&#10;fout.WriteLine( &quot; \&quot;dataPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;data\&quot;,&quot; );&#13;&#10;fout.WriteLine( &quot; \&quot;walletsPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;wallets\&quot;,&quot; );&#13;&#10;fout.WriteLine( &quot; \&quot;listsPath\&quot;: \&quot;&quot; + dataFolder.split(&apos;\\&apos;).join(&apos;\\\\&apos;) + &quot;lists\&quot;,&quot; );&#13;&#10;&#13;&#10;// copy rest of settings&#13;&#10;while( !fin.AtEndOfStream ) {&#13;&#10;&#9;fout.WriteLine( fin.ReadLine() );&#13;&#10;}&#13;&#10;&#13;&#10;fin.Close();&#13;&#10;fout.Close();&#13;&#10;" AdditionalSeq="AI_DATA_SETTER_3"/>
<ROW Action="DetectRunningProcess" Type="1" Source="aicustact.dll" Target="DetectProcess" Options="3" AdditionalSeq="AI_DATA_SETTER_8"/>
<ROW Action="DetectW32Time" Type="1" Source="aicustact.dll" Target="DetectService" Options="3" AdditionalSeq="AI_DATA_SETTER_11"/>
<ROW Action="NTP_config" Type="3090" Source="ntpcfg.bat"/>

Binary file not shown.

View File

@@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<version>1.4.0</version>
<description>POM was created from install:install-file</description>
</project>

View File

@@ -3,14 +3,15 @@
<groupId>org.ciyam</groupId>
<artifactId>AT</artifactId>
<versioning>
<release>1.3.8</release>
<release>1.4.0</release>
<versions>
<version>1.3.4</version>
<version>1.3.5</version>
<version>1.3.6</version>
<version>1.3.7</version>
<version>1.3.8</version>
<version>1.4.0</version>
</versions>
<lastUpdated>20200925114415</lastUpdated>
<lastUpdated>20221105114346</lastUpdated>
</versioning>
</metadata>

40
pom.xml
View File

@@ -3,15 +3,15 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.qortal</groupId>
<artifactId>qortal</artifactId>
<version>3.2.2</version>
<version>4.2.1</version>
<packaging>jar</packaging>
<properties>
<skipTests>true</skipTests>
<altcoinj.version>bf9fb80</altcoinj.version>
<altcoinj.version>7dc8c6f</altcoinj.version>
<bitcoinj.version>0.15.10</bitcoinj.version>
<bouncycastle.version>1.64</bouncycastle.version>
<bouncycastle.version>1.69</bouncycastle.version>
<build.timestamp>${maven.build.timestamp}</build.timestamp>
<ciyam-at.version>1.3.8</ciyam-at.version>
<ciyam-at.version>1.4.0</ciyam-at.version>
<commons-net.version>3.6</commons-net.version>
<commons-text.version>1.8</commons-text.version>
<commons-io.version>2.6</commons-io.version>
@@ -34,6 +34,9 @@
<package-info-maven-plugin.version>1.1.0</package-info-maven-plugin.version>
<jsoup.version>1.13.1</jsoup.version>
<java-diff-utils.version>4.10</java-diff-utils.version>
<grpc.version>1.45.1</grpc.version>
<protobuf.version>3.19.4</protobuf.version>
<simplemagic.version>1.17</simplemagic.version>
</properties>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
@@ -145,6 +148,7 @@
tagsSorter: "alpha",
operationsSorter:
"alpha",
validatorUrl: false,
</value>
</replacement>
</replacements>
@@ -302,6 +306,7 @@
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>org.qortal.controller.Controller</mainClass>
<manifestEntries>
<Multi-Release>true</Multi-Release>
<Class-Path>. ..</Class-Path>
</manifestEntries>
</transformer>
@@ -444,7 +449,7 @@
</dependency>
<!-- For Litecoin, etc. support, requires bitcoinj -->
<dependency>
<groupId>com.github.jjos2372</groupId>
<groupId>com.github.qortal</groupId>
<artifactId>altcoinj</artifactId>
<version>${altcoinj.version}</version>
</dependency>
@@ -705,5 +710,30 @@
<artifactId>java-diff-utils</artifactId>
<version>${java-diff-utils.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-netty</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-protobuf</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-stub</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>${protobuf.version}</version>
</dependency>
<dependency>
<groupId>com.j256.simplemagic</groupId>
<artifactId>simplemagic</artifactId>
<version>${simplemagic.version}</version>
</dependency>
</dependencies>
</project>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,100 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* LiteWalletJni code based on https://github.com/PirateNetwork/cordova-plugin-litewallet
*
* MIT License
*
* Copyright (c) 2020 Zero Currency Coin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.rust.litewalletjni;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.PirateChainWalletController;
import java.nio.file.Path;
import java.nio.file.Paths;
public class LiteWalletJni {
protected static final Logger LOGGER = LogManager.getLogger(LiteWalletJni.class);
public static native String initlogging();
public static native String initnew(final String serveruri, final String params, final String saplingOutputb64, final String saplingSpendb64);
public static native String initfromseed(final String serveruri, final String params, final String seed, final String birthday, final String saplingOutputb64, final String saplingSpendb64);
public static native String initfromb64(final String serveruri, final String params, final String datab64, final String saplingOutputb64, final String saplingSpendb64);
public static native String save();
public static native String execute(final String cmd, final String args);
public static native String getseedphrase();
public static native String getseedphrasefromentropyb64(final String entropy64);
public static native String checkseedphrase(final String input);
private static boolean loaded = false;
public static void loadLibrary() {
if (loaded) {
return;
}
String osName = System.getProperty("os.name");
String osArchitecture = System.getProperty("os.arch");
LOGGER.info("OS Name: {}", osName);
LOGGER.info("OS Architecture: {}", osArchitecture);
try {
String libFileName = PirateChainWalletController.getRustLibFilename();
if (libFileName == null) {
LOGGER.info("Library not found for OS: {}, arch: {}", osName, osArchitecture);
return;
}
Path libPath = Paths.get(PirateChainWalletController.getRustLibOuterDirectory().toString(), libFileName);
System.load(libPath.toAbsolutePath().toString());
loaded = true;
}
catch (UnsatisfiedLinkError e) {
LOGGER.info("Unable to load library");
}
}
public static boolean isLoaded() {
return loaded;
}
}

View File

@@ -8,6 +8,7 @@ import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.security.Security;
import java.util.*;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -18,6 +19,8 @@ import org.qortal.api.ApiRequest;
import org.qortal.controller.AutoUpdate;
import org.qortal.settings.Settings;
import static org.qortal.controller.AutoUpdate.AGENTLIB_JVM_HOLDER_ARG;
public class ApplyUpdate {
static {
@@ -37,7 +40,7 @@ public class ApplyUpdate {
private static final String JAVA_TOOL_OPTIONS_NAME = "JAVA_TOOL_OPTIONS";
private static final String JAVA_TOOL_OPTIONS_VALUE = "-XX:MaxRAMFraction=4";
private static final long CHECK_INTERVAL = 10 * 1000L; // ms
private static final long CHECK_INTERVAL = 30 * 1000L; // ms
private static final int MAX_ATTEMPTS = 12;
public static void main(String[] args) {
@@ -197,6 +200,11 @@ public class ApplyUpdate {
// JVM arguments
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
// Reapply any retained, but disabled, -agentlib JVM arg
javaCmd = javaCmd.stream()
.map(arg -> arg.replace(AGENTLIB_JVM_HOLDER_ARG, "-agentlib"))
.collect(Collectors.toList());
// Call mainClass in JAR
javaCmd.addAll(Arrays.asList("-jar", JAR_FILENAME));
@@ -205,7 +213,7 @@ public class ApplyUpdate {
}
try {
LOGGER.info(() -> String.format("Restarting node with: %s", String.join(" ", javaCmd)));
LOGGER.info(String.format("Restarting node with: %s", String.join(" ", javaCmd)));
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
@@ -214,8 +222,15 @@ public class ApplyUpdate {
processBuilder.environment().put(JAVA_TOOL_OPTIONS_NAME, JAVA_TOOL_OPTIONS_VALUE);
}
processBuilder.start();
} catch (IOException e) {
// New process will inherit our stdout and stderr
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = processBuilder.start();
// Nothing to pipe to new process, so close output stream (process's stdin)
process.getOutputStream().close();
} catch (Exception e) {
LOGGER.error(String.format("Failed to restart node (BAD): %s", e.getMessage()));
}
}

View File

@@ -8,11 +8,13 @@ import javax.xml.bind.annotation.XmlAccessorType;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.block.BlockChain;
import org.qortal.controller.LiteNode;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
@XmlAccessorType(XmlAccessType.NONE) // Stops JAX-RS errors when unmarshalling blockchain config
@@ -59,7 +61,17 @@ public class Account {
// Balance manipulations - assetId is 0 for QORT
public long getConfirmedBalance(long assetId) throws DataException {
AccountBalanceData accountBalanceData = this.repository.getAccountRepository().getBalance(this.address, assetId);
AccountBalanceData accountBalanceData;
if (Settings.getInstance().isLite()) {
// Lite nodes request data from peers instead of the local db
accountBalanceData = LiteNode.getInstance().fetchAccountBalance(this.address, assetId);
}
else {
// All other node types fetch from the local db
accountBalanceData = this.repository.getAccountRepository().getBalance(this.address, assetId);
}
if (accountBalanceData == null)
return 0;
@@ -199,7 +211,8 @@ public class Account {
if (level != null && level >= BlockChain.getInstance().getMinAccountLevelToMint())
return true;
if (Account.isFounder(accountData.getFlags()))
// Founders can always mint, unless they have a penalty
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
return true;
return false;
@@ -210,6 +223,11 @@ public class Account {
return this.repository.getAccountRepository().getMintedBlockCount(this.address);
}
/** Returns account's blockMintedPenalty or null if account not found in repository. */
public Integer getBlocksMintedPenalty() throws DataException {
return this.repository.getAccountRepository().getBlocksMintedPenaltyCount(this.address);
}
/** Returns whether account can build reward-shares.
* <p>
@@ -231,7 +249,7 @@ public class Account {
if (level != null && level >= BlockChain.getInstance().getMinAccountLevelToRewardShare())
return true;
if (Account.isFounder(accountData.getFlags()))
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
return true;
return false;
@@ -259,7 +277,7 @@ public class Account {
/**
* Returns 'effective' minting level, or zero if account does not exist/cannot mint.
* <p>
* For founder accounts, this returns "founderEffectiveMintingLevel" from blockchain config.
* For founder accounts with no penalty, this returns "founderEffectiveMintingLevel" from blockchain config.
*
* @return 0+
* @throws DataException
@@ -269,7 +287,8 @@ public class Account {
if (accountData == null)
return 0;
if (Account.isFounder(accountData.getFlags()))
// Founders are assigned a different effective minting level, as long as they have no penalty
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
return BlockChain.getInstance().getFounderEffectiveMintingLevel();
return accountData.getLevel();
@@ -277,8 +296,6 @@ public class Account {
/**
* Returns 'effective' minting level, or zero if reward-share does not exist.
* <p>
* this is being used on src/main/java/org/qortal/api/resource/AddressesResource.java to fulfil the online accounts api call
*
* @param repository
* @param rewardSharePublicKey
@@ -297,7 +314,7 @@ public class Account {
/**
* Returns 'effective' minting level, with a fix for the zero level.
* <p>
* For founder accounts, this returns "founderEffectiveMintingLevel" from blockchain config.
* For founder accounts with no penalty, this returns "founderEffectiveMintingLevel" from blockchain config.
*
* @param repository
* @param rewardSharePublicKey
@@ -310,7 +327,7 @@ public class Account {
if (rewardShareData == null)
return 0;
else if(!rewardShareData.getMinter().equals(rewardShareData.getRecipient()))//the minter is different than the recipient this means sponsorship
else if (!rewardShareData.getMinter().equals(rewardShareData.getRecipient())) // Sponsorship reward share
return 0;
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());

View File

@@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount {
private final Ed25519PrivateKeyParameters edPrivateKeyParams;
/**
* Create PrivateKeyAccount using byte[32] seed.
* Create PrivateKeyAccount using byte[32] private key.
*
* @param seed
* @param privateKey
* byte[32] used to create private/public key pair
* @throws IllegalArgumentException
* if passed invalid seed
* if passed invalid privateKey
*/
public PrivateKeyAccount(Repository repository, byte[] seed) {
this(repository, new Ed25519PrivateKeyParameters(seed, 0));
public PrivateKeyAccount(Repository repository, byte[] privateKey) {
this(repository, new Ed25519PrivateKeyParameters(privateKey, 0));
}
private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) {
@@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount {
return this.privateKey;
}
public static byte[] toPublicKey(byte[] seed) {
return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded();
}
public byte[] sign(byte[] message) {
return Crypto.sign(this.edPrivateKeyParams, message);
}

View File

@@ -0,0 +1,367 @@
package org.qortal.account;
import org.qortal.api.resource.TransactionsResource;
import org.qortal.asset.Asset;
import org.qortal.data.account.AccountData;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.transaction.Transaction.TransactionType;
import java.util.*;
import java.util.stream.Collectors;
public class SelfSponsorshipAlgoV1 {
private final Repository repository;
private final String address;
private final AccountData accountData;
private final long snapshotTimestamp;
private final boolean override;
private int registeredNameCount = 0;
private int suspiciousCount = 0;
private int suspiciousPercent = 0;
private int consolidationCount = 0;
private int bulkIssuanceCount = 0;
private int recentSponsorshipCount = 0;
private List<RewardShareTransactionData> sponsorshipRewardShares = new ArrayList<>();
private final Map<String, List<TransactionData>> paymentsByAddress = new HashMap<>();
private final Set<String> sponsees = new LinkedHashSet<>();
private Set<String> consolidatedAddresses = new LinkedHashSet<>();
private final Set<String> zeroTransactionAddreses = new LinkedHashSet<>();
private final Set<String> penaltyAddresses = new LinkedHashSet<>();
public SelfSponsorshipAlgoV1(Repository repository, String address, long snapshotTimestamp, boolean override) throws DataException {
this.repository = repository;
this.address = address;
this.accountData = this.repository.getAccountRepository().getAccount(this.address);
this.snapshotTimestamp = snapshotTimestamp;
this.override = override;
}
public String getAddress() {
return this.address;
}
public Set<String> getPenaltyAddresses() {
return this.penaltyAddresses;
}
public void run() throws DataException {
if (this.accountData == null) {
// Nothing to do
return;
}
this.fetchSponsorshipRewardShares();
if (this.sponsorshipRewardShares.isEmpty()) {
// Nothing to do
return;
}
this.findConsolidatedRewards();
this.findBulkIssuance();
this.findRegisteredNameCount();
this.findRecentSponsorshipCount();
int score = this.calculateScore();
if (score <= 0 && !override) {
return;
}
String newAddress = this.getDestinationAccount(this.address);
while (newAddress != null) {
// Found destination account
this.penaltyAddresses.add(newAddress);
// Run algo for this address, but in "override" mode because it has already been flagged
SelfSponsorshipAlgoV1 algoV1 = new SelfSponsorshipAlgoV1(this.repository, newAddress, this.snapshotTimestamp, true);
algoV1.run();
this.penaltyAddresses.addAll(algoV1.getPenaltyAddresses());
newAddress = this.getDestinationAccount(newAddress);
}
this.penaltyAddresses.add(this.address);
if (this.override || this.recentSponsorshipCount < 20) {
this.penaltyAddresses.addAll(this.consolidatedAddresses);
this.penaltyAddresses.addAll(this.zeroTransactionAddreses);
}
else {
this.penaltyAddresses.addAll(this.sponsees);
}
}
private String getDestinationAccount(String address) throws DataException {
List<TransactionData> transferPrivsTransactions = fetchTransferPrivsForAddress(address);
if (transferPrivsTransactions.isEmpty()) {
// No TRANSFER_PRIVS transactions for this address
return null;
}
AccountData accountData = this.repository.getAccountRepository().getAccount(address);
if (accountData == null) {
return null;
}
for (TransactionData transactionData : transferPrivsTransactions) {
TransferPrivsTransactionData transferPrivsTransactionData = (TransferPrivsTransactionData) transactionData;
if (Arrays.equals(transferPrivsTransactionData.getSenderPublicKey(), accountData.getPublicKey())) {
return transferPrivsTransactionData.getRecipient();
}
}
return null;
}
private void findConsolidatedRewards() throws DataException {
List<String> sponseesThatSentRewards = new ArrayList<>();
Map<String, Integer> paymentRecipients = new HashMap<>();
// Collect outgoing payments of each sponsee
for (String sponseeAddress : this.sponsees) {
// Firstly fetch all payments for address, since the functions below depend on this data
this.fetchPaymentsForAddress(sponseeAddress);
// Check if the address has zero relevant transactions
if (this.hasZeroTransactions(sponseeAddress)) {
this.zeroTransactionAddreses.add(sponseeAddress);
}
// Get payment recipients
List<String> allPaymentRecipients = this.fetchOutgoingPaymentRecipientsForAddress(sponseeAddress);
if (allPaymentRecipients.isEmpty()) {
continue;
}
sponseesThatSentRewards.add(sponseeAddress);
List<String> addressesPaidByThisSponsee = new ArrayList<>();
for (String paymentRecipient : allPaymentRecipients) {
if (addressesPaidByThisSponsee.contains(paymentRecipient)) {
// We already tracked this association - don't allow multiple to stack up
continue;
}
addressesPaidByThisSponsee.add(paymentRecipient);
// Increment count for this recipient, or initialize to 1 if not present
if (paymentRecipients.computeIfPresent(paymentRecipient, (k, v) -> v + 1) == null) {
paymentRecipients.put(paymentRecipient, 1);
}
}
}
// Exclude addresses with a low number of payments
Map<String, Integer> filteredPaymentRecipients = paymentRecipients.entrySet().stream()
.filter(p -> p.getValue() != null && p.getValue() >= 10)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
// Now check how many sponsees have sent to this subset of addresses
Map<String, Integer> sponseesThatConsolidatedRewards = new HashMap<>();
for (String sponseeAddress : sponseesThatSentRewards) {
List<String> allPaymentRecipients = this.fetchOutgoingPaymentRecipientsForAddress(sponseeAddress);
// Remove any that aren't to one of the flagged recipients (i.e. consolidation)
allPaymentRecipients.removeIf(r -> !filteredPaymentRecipients.containsKey(r));
int count = allPaymentRecipients.size();
if (count == 0) {
continue;
}
if (sponseesThatConsolidatedRewards.computeIfPresent(sponseeAddress, (k, v) -> v + count) == null) {
sponseesThatConsolidatedRewards.put(sponseeAddress, count);
}
}
// Remove sponsees that have only sent a low number of payments to the filtered addresses
Map<String, Integer> filteredSponseesThatConsolidatedRewards = sponseesThatConsolidatedRewards.entrySet().stream()
.filter(p -> p.getValue() != null && p.getValue() >= 2)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
this.consolidationCount = sponseesThatConsolidatedRewards.size();
this.consolidatedAddresses = new LinkedHashSet<>(filteredSponseesThatConsolidatedRewards.keySet());
this.suspiciousCount = this.consolidationCount + this.zeroTransactionAddreses.size();
this.suspiciousPercent = (int)(this.suspiciousCount / (float) this.sponsees.size() * 100);
}
private void findBulkIssuance() {
Long lastTimestamp = null;
for (RewardShareTransactionData rewardShareTransactionData : sponsorshipRewardShares) {
long timestamp = rewardShareTransactionData.getTimestamp();
if (timestamp >= this.snapshotTimestamp) {
continue;
}
if (lastTimestamp != null) {
if (timestamp - lastTimestamp < 3*60*1000L) {
this.bulkIssuanceCount++;
}
}
lastTimestamp = timestamp;
}
}
private void findRegisteredNameCount() throws DataException {
int registeredNameCount = 0;
for (String sponseeAddress : sponsees) {
List<NameData> names = repository.getNameRepository().getNamesByOwner(sponseeAddress);
for (NameData name : names) {
if (name.getRegistered() < this.snapshotTimestamp) {
registeredNameCount++;
break;
}
}
}
this.registeredNameCount = registeredNameCount;
}
private void findRecentSponsorshipCount() {
final long referenceTimestamp = this.snapshotTimestamp - (365 * 24 * 60 * 60 * 1000L);
int recentSponsorshipCount = 0;
for (RewardShareTransactionData rewardShare : sponsorshipRewardShares) {
if (rewardShare.getTimestamp() >= referenceTimestamp) {
recentSponsorshipCount++;
}
}
this.recentSponsorshipCount = recentSponsorshipCount;
}
private int calculateScore() {
final int suspiciousMultiplier = (this.suspiciousCount >= 100) ? this.suspiciousPercent : 1;
final int nameMultiplier = (this.sponsees.size() >= 50 && this.registeredNameCount == 0) ? 2 : 1;
final int consolidationMultiplier = Math.max(this.consolidationCount, 1);
final int bulkIssuanceMultiplier = Math.max(this.bulkIssuanceCount / 2, 1);
final int offset = 9;
return suspiciousMultiplier * nameMultiplier * consolidationMultiplier * bulkIssuanceMultiplier - offset;
}
private void fetchSponsorshipRewardShares() throws DataException {
List<RewardShareTransactionData> sponsorshipRewardShares = new ArrayList<>();
// Define relevant transactions
List<TransactionType> txTypes = List.of(TransactionType.REWARD_SHARE);
List<TransactionData> transactionDataList = fetchTransactions(repository, txTypes, this.address, false);
for (TransactionData transactionData : transactionDataList) {
if (transactionData.getType() != TransactionType.REWARD_SHARE) {
continue;
}
RewardShareTransactionData rewardShareTransactionData = (RewardShareTransactionData) transactionData;
// Skip removals
if (rewardShareTransactionData.getSharePercent() < 0) {
continue;
}
// Skip if not sponsored by this account
if (!Arrays.equals(rewardShareTransactionData.getCreatorPublicKey(), accountData.getPublicKey())) {
continue;
}
// Skip self shares
if (Objects.equals(rewardShareTransactionData.getRecipient(), this.address)) {
continue;
}
boolean duplicateFound = false;
for (RewardShareTransactionData existingRewardShare : sponsorshipRewardShares) {
if (Objects.equals(existingRewardShare.getRecipient(), rewardShareTransactionData.getRecipient())) {
// Duplicate
duplicateFound = true;
break;
}
}
if (!duplicateFound) {
sponsorshipRewardShares.add(rewardShareTransactionData);
this.sponsees.add(rewardShareTransactionData.getRecipient());
}
}
this.sponsorshipRewardShares = sponsorshipRewardShares;
}
private List<TransactionData> fetchTransferPrivsForAddress(String address) throws DataException {
return fetchTransactions(repository,
List.of(TransactionType.TRANSFER_PRIVS),
address, true);
}
private void fetchPaymentsForAddress(String address) throws DataException {
List<TransactionData> payments = fetchTransactions(repository,
Arrays.asList(TransactionType.PAYMENT, TransactionType.TRANSFER_ASSET),
address, false);
this.paymentsByAddress.put(address, payments);
}
private List<String> fetchOutgoingPaymentRecipientsForAddress(String address) {
List<String> outgoingPaymentRecipients = new ArrayList<>();
List<TransactionData> transactionDataList = this.paymentsByAddress.get(address);
if (transactionDataList == null) transactionDataList = new ArrayList<>();
transactionDataList.removeIf(t -> t.getTimestamp() >= this.snapshotTimestamp);
for (TransactionData transactionData : transactionDataList) {
switch (transactionData.getType()) {
case PAYMENT:
PaymentTransactionData paymentTransactionData = (PaymentTransactionData) transactionData;
if (!Objects.equals(paymentTransactionData.getRecipient(), address)) {
// Outgoing payment from this account
outgoingPaymentRecipients.add(paymentTransactionData.getRecipient());
}
break;
case TRANSFER_ASSET:
TransferAssetTransactionData transferAssetTransactionData = (TransferAssetTransactionData) transactionData;
if (transferAssetTransactionData.getAssetId() == Asset.QORT) {
if (!Objects.equals(transferAssetTransactionData.getRecipient(), address)) {
// Outgoing payment from this account
outgoingPaymentRecipients.add(transferAssetTransactionData.getRecipient());
}
}
break;
default:
break;
}
}
return outgoingPaymentRecipients;
}
private boolean hasZeroTransactions(String address) {
List<TransactionData> transactionDataList = this.paymentsByAddress.get(address);
if (transactionDataList == null) {
return true;
}
transactionDataList.removeIf(t -> t.getTimestamp() >= this.snapshotTimestamp);
return transactionDataList.size() == 0;
}
private static List<TransactionData> fetchTransactions(Repository repository, List<TransactionType> txTypes, String address, boolean reverse) throws DataException {
// Fetch all relevant transactions for this account
List<byte[]> signatures = repository.getTransactionRepository()
.getSignaturesMatchingCriteria(null, null, null, txTypes,
null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED,
null, null, reverse);
List<TransactionData> transactionDataList = new ArrayList<>();
for (byte[] signature : signatures) {
// Fetch transaction data
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null) {
continue;
}
transactionDataList.add(transactionData);
}
return transactionDataList;
}
}

View File

@@ -79,7 +79,7 @@ public enum ApiError {
// BUYER_ALREADY_OWNER(411, 422),
// POLLS
// POLL_NO_EXISTS(501, 404),
POLL_NO_EXISTS(501, 404),
// POLL_ALREADY_EXISTS(502, 422),
// DUPLICATE_OPTION(503, 422),
// POLL_OPTION_NO_EXISTS(504, 404),

View File

@@ -3,6 +3,7 @@ package org.qortal.api;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Socket;
@@ -20,14 +21,12 @@ import javax.net.ssl.SNIHostName;
import javax.net.ssl.SNIServerName;
import javax.net.ssl.SSLParameters;
import javax.net.ssl.SSLSocket;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.UnmarshalException;
import javax.xml.bind.Unmarshaller;
import javax.xml.bind.*;
import javax.xml.transform.stream.StreamSource;
import org.eclipse.persistence.exceptions.XMLMarshalException;
import org.eclipse.persistence.jaxb.JAXBContextFactory;
import org.eclipse.persistence.jaxb.MarshallerProperties;
import org.eclipse.persistence.jaxb.UnmarshallerProperties;
public class ApiRequest {
@@ -107,6 +106,36 @@ public class ApiRequest {
}
}
private static Marshaller createMarshaller(Class<?> objectClass) {
try {
// Create JAXB context aware of object's class
JAXBContext jc = JAXBContextFactory.createContext(new Class[] { objectClass }, null);
// Create marshaller
Marshaller marshaller = jc.createMarshaller();
// Set the marshaller media type to JSON
marshaller.setProperty(MarshallerProperties.MEDIA_TYPE, "application/json");
// Tell marshaller not to include JSON root element in the output
marshaller.setProperty(MarshallerProperties.JSON_INCLUDE_ROOT, false);
return marshaller;
} catch (JAXBException e) {
throw new RuntimeException("Unable to create API marshaller", e);
}
}
public static void marshall(Writer writer, Object object) throws IOException {
Marshaller marshaller = createMarshaller(object.getClass());
try {
marshaller.marshal(object, writer);
} catch (JAXBException e) {
throw new IOException("Unable to create marshall object for API", e);
}
}
public static String getParamsString(Map<String, String> params) {
StringBuilder result = new StringBuilder();

View File

@@ -13,8 +13,8 @@ import java.security.SecureRandom;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.servlet.http.HttpServletRequest;
import org.checkerframework.checker.units.qual.A;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RedirectPatternRule;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
@@ -41,6 +41,7 @@ import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.api.websocket.*;
import org.qortal.network.Network;
import org.qortal.settings.Settings;
public class ApiService {
@@ -51,9 +52,11 @@ public class ApiService {
private Server server;
private ApiKey apiKey;
public static final String API_VERSION_HEADER = "X-API-VERSION";
private ApiService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.resource");
this.config.packages("org.qortal.api.resource", "org.qortal.api.restricted.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
@@ -93,7 +96,7 @@ public class ApiService {
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
SSLContext sslContext = SSLContext.getInstance("TLSv1.3", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
@@ -123,13 +126,13 @@ public class ApiService {
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getApiPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getApiPort());
this.server = new Server(endpoint);
}
@@ -230,4 +233,19 @@ public class ApiService {
this.server = null;
}
public static int getApiVersion(HttpServletRequest request) {
// Get API version
String apiVersionString = request.getHeader(API_VERSION_HEADER);
if (apiVersionString == null) {
// Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141
apiVersionString = request.getParameter("apiVersion");
}
int apiVersion = 1;
if (apiVersionString != null) {
apiVersion = Integer.parseInt(apiVersionString);
}
return apiVersion;
}
}

View File

@@ -0,0 +1,173 @@
package org.qortal.api;
import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.server.handler.InetAccessHandler;
import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.servlets.CrossOriginFilter;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.network.Network;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import java.io.InputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.security.SecureRandom;
public class DevProxyService {
private static DevProxyService instance;
private final ResourceConfig config;
private Server server;
private DevProxyService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.proxy.resource", "org.qortal.api.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
}
public static DevProxyService getInstance() {
if (instance == null)
instance = new DevProxyService();
return instance;
}
public Iterable<Class<?>> getResources() {
return this.config.getClasses();
}
public void start() throws DataException {
try {
// Create API server
// SSL support if requested
String keystorePathname = Settings.getInstance().getSslKeystorePathname();
String keystorePassword = Settings.getInstance().getSslKeystorePassword();
if (keystorePathname != null && keystorePassword != null) {
// SSL version
if (!Files.isReadable(Path.of(keystorePathname)))
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
try (InputStream keystoreStream = Files.newInputStream(Paths.get(keystorePathname))) {
keyStore.load(keystoreStream, keystorePassword.toCharArray());
}
keyManagerFactory.init(keyStore, keystorePassword.toCharArray());
sslContext.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom());
SslContextFactory.Server sslContextFactory = new SslContextFactory.Server();
sslContextFactory.setSslContext(sslContext);
this.server = new Server();
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
httpConfig.setSecurePort(Settings.getInstance().getDevProxyPort());
SecureRequestCustomizer src = new SecureRequestCustomizer();
httpConfig.addCustomizer(src);
HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfig);
SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString());
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getDevProxyPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getDevProxyPort());
this.server = new Server(endpoint);
}
// Error handler
ErrorHandler errorHandler = new ApiErrorHandler();
this.server.setErrorHandler(errorHandler);
// Request logging
if (Settings.getInstance().isDevProxyLoggingEnabled()) {
RequestLogWriter logWriter = new RequestLogWriter("devproxy-requests.log");
logWriter.setAppend(true);
logWriter.setTimeZone("UTC");
RequestLog requestLog = new CustomRequestLog(logWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT);
this.server.setRequestLog(requestLog);
}
// Access handler (currently no whitelist is used)
InetAccessHandler accessHandler = new InetAccessHandler();
this.server.setHandler(accessHandler);
// URL rewriting
RewriteHandler rewriteHandler = new RewriteHandler();
accessHandler.setHandler(rewriteHandler);
// Context
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SESSIONS);
context.setContextPath("/");
rewriteHandler.setHandler(context);
// Cross-origin resource sharing
FilterHolder corsFilterHolder = new FilterHolder(CrossOriginFilter.class);
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*");
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET, POST, DELETE");
corsFilterHolder.setInitParameter(CrossOriginFilter.CHAIN_PREFLIGHT_PARAM, "false");
context.addFilter(corsFilterHolder, "/*", null);
// API servlet
ServletContainer container = new ServletContainer(this.config);
ServletHolder apiServlet = new ServletHolder(container);
apiServlet.setInitOrder(1);
context.addServlet(apiServlet, "/*");
// Start server
this.server.start();
} catch (Exception e) {
// Failed to start
throw new DataException("Failed to start developer proxy", e);
}
}
public void stop() {
try {
// Stop server
this.server.stop();
} catch (Exception e) {
// Failed to stop
}
this.server = null;
instance = null;
}
}

View File

@@ -3,7 +3,6 @@ package org.qortal.api;
import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
import org.eclipse.jetty.rewrite.handler.RewritePatternRule;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.server.handler.InetAccessHandler;
@@ -16,6 +15,7 @@ import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.network.Network;
import org.qortal.settings.Settings;
import javax.net.ssl.KeyManagerFactory;
@@ -38,7 +38,7 @@ public class DomainMapService {
private DomainMapService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.domainmap.resource");
this.config.packages("org.qortal.api.resource", "org.qortal.api.domainmap.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
@@ -69,7 +69,7 @@ public class DomainMapService {
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
SSLContext sslContext = SSLContext.getInstance("TLSv1.3", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
@@ -99,13 +99,13 @@ public class DomainMapService {
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getDomainMapPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getDomainMapPort());
this.server = new Server(endpoint);
}

View File

@@ -15,6 +15,7 @@ import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.network.Network;
import org.qortal.settings.Settings;
import javax.net.ssl.KeyManagerFactory;
@@ -37,7 +38,7 @@ public class GatewayService {
private GatewayService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.gateway.resource");
this.config.packages("org.qortal.api.resource", "org.qortal.api.gateway.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
@@ -68,7 +69,7 @@ public class GatewayService {
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
SSLContext sslContext = SSLContext.getInstance("TLSv1.3", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
@@ -98,13 +99,13 @@ public class GatewayService {
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getGatewayPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getGatewayPort());
this.server = new Server(endpoint);
}

View File

@@ -5,28 +5,71 @@ import org.apache.logging.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.qortal.arbitrary.misc.Service;
import java.util.Objects;
public class HTMLParser {
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
private String linkPrefix;
private String qdnBase;
private String qdnBaseWithPath;
private byte[] data;
private String qdnContext;
private String resourceId;
private Service service;
private String identifier;
private String path;
private String theme;
private boolean usingCustomRouting;
public HTMLParser(String resourceId, String inPath, String prefix, boolean usePrefix, byte[] data) {
String inPathWithoutFilename = inPath.substring(0, inPath.lastIndexOf('/'));
this.linkPrefix = usePrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : "";
public HTMLParser(String resourceId, String inPath, String prefix, boolean includeResourceIdInPrefix, byte[] data,
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting) {
String inPathWithoutFilename = inPath.contains("/") ? inPath.substring(0, inPath.lastIndexOf('/')) : String.format("/%s",inPath);
this.qdnBase = includeResourceIdInPrefix ? String.format("%s/%s", prefix, resourceId) : prefix;
this.qdnBaseWithPath = includeResourceIdInPrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : String.format("%s%s", prefix, inPathWithoutFilename);
this.data = data;
this.qdnContext = qdnContext;
this.resourceId = resourceId;
this.service = service;
this.identifier = identifier;
this.path = inPath;
this.theme = theme;
this.usingCustomRouting = usingCustomRouting;
}
public void addAdditionalHeaderTags() {
String fileContents = new String(data);
Document document = Jsoup.parse(fileContents);
String baseUrl = this.linkPrefix + "/";
Elements head = document.getElementsByTag("head");
if (!head.isEmpty()) {
// Add q-apps script tag
String qAppsScriptElement = String.format("<script src=\"/apps/q-apps.js?time=%d\">", System.currentTimeMillis());
head.get(0).prepend(qAppsScriptElement);
// Add q-apps gateway script tag if in gateway mode
if (Objects.equals(this.qdnContext, "gateway")) {
String qAppsGatewayScriptElement = String.format("<script src=\"/apps/q-apps-gateway.js?time=%d\">", System.currentTimeMillis());
head.get(0).prepend(qAppsGatewayScriptElement);
}
// Escape and add vars
String qdnContext = this.qdnContext != null ? this.qdnContext.replace("\\", "").replace("\"","\\\"") : "";
String service = this.service.toString().replace("\\", "").replace("\"","\\\"");
String name = this.resourceId != null ? this.resourceId.replace("\\", "").replace("\"","\\\"") : "";
String identifier = this.identifier != null ? this.identifier.replace("\\", "").replace("\"","\\\"") : "";
String path = this.path != null ? this.path.replace("\\", "").replace("\"","\\\"") : "";
String theme = this.theme != null ? this.theme.replace("\\", "").replace("\"","\\\"") : "";
String qdnBase = this.qdnBase != null ? this.qdnBase.replace("\\", "").replace("\"","\\\"") : "";
String qdnBaseWithPath = this.qdnBaseWithPath != null ? this.qdnBaseWithPath.replace("\\", "").replace("\"","\\\"") : "";
String qdnContextVar = String.format("<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>", qdnContext, theme, service, name, identifier, path, qdnBase, qdnBaseWithPath);
head.get(0).prepend(qdnContextVar);
// Add base href tag
String baseElement = String.format("<base href=\"%s\">", baseUrl);
// Exclude the path if this request was routed back to the index automatically
String baseHref = this.usingCustomRouting ? this.qdnBase : this.qdnBaseWithPath;
String baseElement = String.format("<base href=\"%s/\">", baseHref);
head.get(0).prepend(baseElement);
// Add meta charset tag
@@ -39,7 +82,7 @@ public class HTMLParser {
}
public static boolean isHtmlFile(String path) {
if (path.endsWith(".html") || path.endsWith(".htm")) {
if (path.endsWith(".html") || path.endsWith(".htm") || path.equals("")) {
return true;
}
return false;

View File

@@ -15,7 +15,21 @@ public abstract class Security {
public static final String API_KEY_HEADER = "X-API-KEY";
/**
* Check API call is allowed, retrieving the API key from the request header or GET/POST parameters where required
* @param request
*/
public static void checkApiCallAllowed(HttpServletRequest request) {
checkApiCallAllowed(request, null);
}
/**
* Check API call is allowed, retrieving the API key first from the passedApiKey parameter, with a fallback
* to the request header or GET/POST parameters when null.
* @param request
* @param passedApiKey - the API key to test, or null if it should be retrieved from the request headers.
*/
public static void checkApiCallAllowed(HttpServletRequest request, String passedApiKey) {
// We may want to allow automatic authentication for local requests, if enabled in settings
boolean localAuthBypassEnabled = Settings.getInstance().isLocalAuthBypassEnabled();
if (localAuthBypassEnabled) {
@@ -38,7 +52,10 @@ public abstract class Security {
}
// We require an API key to be passed
String passedApiKey = request.getHeader(API_KEY_HEADER);
if (passedApiKey == null) {
// API call not passed as a parameter, so try the header
passedApiKey = request.getHeader(API_KEY_HEADER);
}
if (passedApiKey == null) {
// Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141
passedApiKey = request.getParameter("apiKey");
@@ -56,7 +73,7 @@ public abstract class Security {
public static void disallowLoopbackRequests(HttpServletRequest request) {
try {
InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr());
if (remoteAddr.isLoopbackAddress()) {
if (remoteAddr.isLoopbackAddress() && !Settings.getInstance().isGatewayLoopbackEnabled()) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Local requests not allowed");
}
} catch (UnknownHostException e) {
@@ -84,9 +101,9 @@ public abstract class Security {
}
}
public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier) {
public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier, String apiKey) {
try {
Security.checkApiCallAllowed(request);
Security.checkApiCallAllowed(request, apiKey);
} catch (ApiException e) {
// API call wasn't allowed, but maybe it was pre-authorized

View File

@@ -42,16 +42,16 @@ public class DomainMapResource {
// Build synchronously, so that we don't need to make the summary API endpoints available over
// the domain map server. This means that there will be no loading screen, but this is potentially
// preferred in this situation anyway (e.g. to avoid confusing search engine robots).
return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", false, false);
return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, null, inPath, null, "", false, false);
}
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async) {
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
secret58, prefix, includeResourceIdInPrefix, async, "domainMap", request, response, context);
return renderer.render();
}

View File

@@ -2,6 +2,7 @@ package org.qortal.api.gateway.resource;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.commons.lang3.StringUtils;
import org.qortal.api.Security;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
@@ -16,6 +17,10 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
@Path("/")
@@ -76,50 +81,83 @@ public class GatewayResource {
@GET
@Path("{name}/{path:.*}")
@Path("{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathByName(@PathParam("name") String name,
@PathParam("path") String inPath) {
public HttpServletResponse getPath(@PathParam("path") String inPath) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", true, true);
}
@GET
@Path("{name}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByName(@PathParam("name") String name) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "", true, true);
}
// Optional /site alternative for backwards support
@GET
@Path("/site/{name}/{path:.*}")
public HttpServletResponse getSitePathByName(@PathParam("name") String name,
@PathParam("path") String inPath) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "/site", true, true);
}
@GET
@Path("/site/{name}")
public HttpServletResponse getSiteIndexByName(@PathParam("name") String name) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "/site", true, true);
return this.parsePath(inPath, "gateway", null, true, true);
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async) {
private HttpServletResponse parsePath(String inPath, String qdnContext, String secret58, boolean includeResourceIdInPrefix, boolean async) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
if (inPath == null || inPath.equals("")) {
// Assume not a real file
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
// Default service is WEBSITE
Service service = Service.WEBSITE;
String name = null;
String identifier = null;
String outPath = "";
List<String> prefixParts = new ArrayList<>();
if (!inPath.contains("/")) {
// Assume entire inPath is a registered name
name = inPath;
}
else {
// Parse the path to determine what we need to load
List<String> parts = new LinkedList<>(Arrays.asList(inPath.split("/")));
// Check if the first element is a service
try {
Service parsedService = Service.valueOf(parts.get(0).toUpperCase());
if (parsedService != null) {
// First element matches a service, so we can assume it is one
service = parsedService;
parts.remove(0);
prefixParts.add(service.name());
}
} catch (IllegalArgumentException e) {
// Not a service
}
if (parts.isEmpty()) {
// We need more than just a service
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
// Service is removed, so assume first element is now a registered name
name = parts.get(0);
parts.remove(0);
if (!parts.isEmpty()) {
// Name is removed, so check if the first element is now an identifier
ArbitraryResourceStatus status = this.getStatus(service, name, parts.get(0), false);
if (status.getTotalChunkCount() > 0) {
// Matched service, name and identifier combination - so assume this is an identifier and can be removed
identifier = parts.get(0);
parts.remove(0);
prefixParts.add(identifier);
}
}
if (!parts.isEmpty()) {
// outPath can be built by combining any remaining parts
outPath = String.join("/", parts);
}
}
String prefix = StringUtils.join(prefixParts, "/");
if (prefix != null && prefix.length() > 0) {
prefix = "/" + prefix;
}
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(name, ResourceIdType.NAME, service, identifier, outPath,
secret58, prefix, includeResourceIdInPrefix, async, qdnContext, request, response, context);
return renderer.render();
}

View File

@@ -0,0 +1,56 @@
package org.qortal.api.model;
import org.qortal.block.SelfSponsorshipAlgoV1Block;
import org.qortal.data.account.AccountData;
import org.qortal.data.naming.NameData;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import java.util.ArrayList;
import java.util.List;
@XmlAccessorType(XmlAccessType.FIELD)
public class AccountPenaltyStats {
public Integer totalPenalties;
public Integer maxPenalty;
public Integer minPenalty;
public String penaltyHash;
protected AccountPenaltyStats() {
}
public AccountPenaltyStats(Integer totalPenalties, Integer maxPenalty, Integer minPenalty, String penaltyHash) {
this.totalPenalties = totalPenalties;
this.maxPenalty = maxPenalty;
this.minPenalty = minPenalty;
this.penaltyHash = penaltyHash;
}
public static AccountPenaltyStats fromAccounts(List<AccountData> accounts) {
int totalPenalties = 0;
Integer maxPenalty = null;
Integer minPenalty = null;
List<String> addresses = new ArrayList<>();
for (AccountData accountData : accounts) {
int penalty = accountData.getBlocksMintedPenalty();
addresses.add(accountData.getAddress());
totalPenalties++;
// Penalties are expressed as a negative number, so the min and the max are reversed here
if (maxPenalty == null || penalty < maxPenalty) maxPenalty = penalty;
if (minPenalty == null || penalty > minPenalty) minPenalty = penalty;
}
String penaltyHash = SelfSponsorshipAlgoV1Block.getHash(addresses);
return new AccountPenaltyStats(totalPenalties, maxPenalty, minPenalty, penaltyHash);
}
@Override
public String toString() {
return String.format("totalPenalties: %d, maxPenalty: %d, minPenalty: %d, penaltyHash: %s", totalPenalties, maxPenalty, minPenalty, penaltyHash == null ? "null" : penaltyHash);
}
}

View File

@@ -1,7 +1,8 @@
package org.qortal.api.model;
import io.swagger.v3.oas.annotations.media.Schema;
import org.qortal.data.network.PeerChainTipData;
import org.qortal.controller.Controller;
import org.qortal.data.block.BlockSummaryData;
import org.qortal.data.network.PeerData;
import org.qortal.network.Handshake;
import org.qortal.network.Peer;
@@ -36,6 +37,7 @@ public class ConnectedPeer {
public Long lastBlockTimestamp;
public UUID connectionId;
public String age;
public Boolean isTooDivergent;
protected ConnectedPeer() {
}
@@ -63,11 +65,16 @@ public class ConnectedPeer {
this.age = "connecting...";
}
PeerChainTipData peerChainTipData = peer.getChainTipData();
BlockSummaryData peerChainTipData = peer.getChainTipData();
if (peerChainTipData != null) {
this.lastHeight = peerChainTipData.getLastHeight();
this.lastBlockSignature = peerChainTipData.getLastBlockSignature();
this.lastBlockTimestamp = peerChainTipData.getLastBlockTimestamp();
this.lastHeight = peerChainTipData.getHeight();
this.lastBlockSignature = peerChainTipData.getSignature();
this.lastBlockTimestamp = peerChainTipData.getTimestamp();
}
// Only include isTooDivergent decision if we've had the opportunity to request block summaries this peer
if (peer.getLastTooDivergentTime() != null) {
this.isTooDivergent = Controller.wasRecentlyTooDivergent.test(peer);
}
}

View File

@@ -0,0 +1,16 @@
package org.qortal.api.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class FileProperties {
public String filename;
public String mimeType;
public Long size;
public FileProperties() {
}
}

View File

@@ -12,6 +12,7 @@ public class NodeInfo {
public long buildTimestamp;
public String nodeId;
public boolean isTestNet;
public String type;
public NodeInfo() {
}

View File

@@ -4,6 +4,7 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import org.qortal.controller.Controller;
import org.qortal.controller.OnlineAccountsManager;
import org.qortal.controller.Synchronizer;
import org.qortal.network.Network;
@@ -21,7 +22,7 @@ public class NodeStatus {
public final int height;
public NodeStatus() {
this.isMintingPossible = Controller.getInstance().isMintingPossible();
this.isMintingPossible = OnlineAccountsManager.getInstance().hasActiveOnlineAccountSignatures();
this.syncPercent = Synchronizer.getInstance().getSyncPercent();
this.isSynchronizing = Synchronizer.getInstance().isSynchronizing();

View File

@@ -0,0 +1,56 @@
package org.qortal.api.model;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import io.swagger.v3.oas.annotations.media.Schema;
import org.qortal.data.voting.VoteOnPollData;
@Schema(description = "Poll vote info, including voters")
// All properties to be converted to JSON via JAX-RS
@XmlAccessorType(XmlAccessType.FIELD)
public class PollVotes {
@Schema(description = "List of individual votes")
@XmlElement(name = "votes")
public List<VoteOnPollData> votes;
@Schema(description = "Total number of votes")
public Integer totalVotes;
@Schema(description = "List of vote counts for each option")
public List<OptionCount> voteCounts;
// For JAX-RS
protected PollVotes() {
}
public PollVotes(List<VoteOnPollData> votes, Integer totalVotes, List<OptionCount> voteCounts) {
this.votes = votes;
this.totalVotes = totalVotes;
this.voteCounts = voteCounts;
}
@Schema(description = "Vote info")
// All properties to be converted to JSON via JAX-RS
@XmlAccessorType(XmlAccessType.FIELD)
public static class OptionCount {
@Schema(description = "Option name")
public String optionName;
@Schema(description = "Vote count")
public Integer voteCount;
// For JAX-RS
protected OptionCount() {
}
public OptionCount(String optionName, Integer voteCount) {
this.optionName = optionName;
this.voteCount = voteCount;
}
}
}

View File

@@ -0,0 +1,29 @@
package org.qortal.api.model.crosschain;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import io.swagger.v3.oas.annotations.media.Schema;
@XmlAccessorType(XmlAccessType.FIELD)
public class DigibyteSendRequest {
@Schema(description = "Digibyte BIP32 extended private key", example = "tprv___________________________________________________________________________________________________________")
public String xprv58;
@Schema(description = "Recipient's Digibyte address ('legacy' P2PKH only)", example = "1DigByteEaterAddressDontSendf59kuE")
public String receivingAddress;
@Schema(description = "Amount of DGB to send", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long digibyteAmount;
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 DGB (100 sats) per byte", example = "0.00000100", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public Long feePerByte;
public DigibyteSendRequest() {
}
}

View File

@@ -0,0 +1,32 @@
package org.qortal.api.model.crosschain;
import io.swagger.v3.oas.annotations.media.Schema;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
@XmlAccessorType(XmlAccessType.FIELD)
public class PirateChainSendRequest {
@Schema(description = "32 bytes of entropy, Base58 encoded", example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV")
public String entropy58;
@Schema(description = "Recipient's Pirate Chain address", example = "zc...")
public String receivingAddress;
@Schema(description = "Amount of ARRR to send", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long arrrAmount;
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 ARRR (100 sats) per byte", example = "0.00000100", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public Long feePerByte;
@Schema(description = "Optional memo to include information for the recipient", example = "zc...")
public String memo;
public PirateChainSendRequest() {
}
}

View File

@@ -0,0 +1,29 @@
package org.qortal.api.model.crosschain;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import io.swagger.v3.oas.annotations.media.Schema;
@XmlAccessorType(XmlAccessType.FIELD)
public class RavencoinSendRequest {
@Schema(description = "Ravencoin BIP32 extended private key", example = "tprv___________________________________________________________________________________________________________")
public String xprv58;
@Schema(description = "Recipient's Ravencoin address ('legacy' P2PKH only)", example = "1RvnCoinEaterAddressDontSendf59kuE")
public String receivingAddress;
@Schema(description = "Amount of RVN to send", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long ravencoinAmount;
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 RVN (100 sats) per byte", example = "0.00000100", type = "number")
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public Long feePerByte;
public RavencoinSendRequest() {
}
}

View File

@@ -0,0 +1,164 @@
package org.qortal.api.proxy.resource;
import org.qortal.api.ApiError;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.HTMLParser;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.DevProxyManager;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Context;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.ProtocolException;
import java.net.URL;
import java.util.Enumeration;
@Path("/")
public class DevProxyServerResource {
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
@GET
public HttpServletResponse getProxyIndex() {
return this.proxy("/");
}
@GET
@Path("{path:.*}")
public HttpServletResponse getProxyPath(@PathParam("path") String inPath) {
return this.proxy(inPath);
}
private HttpServletResponse proxy(String inPath) {
try {
String source = DevProxyManager.getInstance().getSourceHostAndPort();
if (!inPath.startsWith("/")) {
inPath = "/" + inPath;
}
String queryString = request.getQueryString() != null ? "?" + request.getQueryString() : "";
// Open URL
URL url = new URL(String.format("http://%s%s%s", source, inPath, queryString));
HttpURLConnection con = (HttpURLConnection) url.openConnection();
// Proxy the request data
this.proxyRequestToConnection(request, con);
try {
// Make the request and proxy the response code
response.setStatus(con.getResponseCode());
}
catch (ConnectException e) {
// Tey converting localhost / 127.0.0.1 to IPv6 [::1]
if (source.startsWith("localhost") || source.startsWith("127.0.0.1")) {
int port = 80;
String[] parts = source.split(":");
if (parts.length > 1) {
port = Integer.parseInt(parts[1]);
}
source = String.format("[::1]:%d", port);
}
// Retry connection
url = new URL(String.format("http://%s%s%s", source, inPath, queryString));
con = (HttpURLConnection) url.openConnection();
this.proxyRequestToConnection(request, con);
response.setStatus(con.getResponseCode());
}
// Proxy the response data back to the caller
this.proxyConnectionToResponse(con, response, inPath);
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, e.getMessage());
}
return response;
}
private void proxyRequestToConnection(HttpServletRequest request, HttpURLConnection con) throws ProtocolException {
// Proxy the request method
con.setRequestMethod(request.getMethod());
// Proxy the request headers
Enumeration<String> headerNames = request.getHeaderNames();
while (headerNames.hasMoreElements()) {
String headerName = headerNames.nextElement();
String headerValue = request.getHeader(headerName);
con.setRequestProperty(headerName, headerValue);
}
// TODO: proxy any POST parameters from "request" to "con"
}
private void proxyConnectionToResponse(HttpURLConnection con, HttpServletResponse response, String inPath) throws IOException {
// Proxy the response headers
for (int i = 0; ; i++) {
String headerKey = con.getHeaderFieldKey(i);
String headerValue = con.getHeaderField(i);
if (headerKey != null && headerValue != null) {
response.addHeader(headerKey, headerValue);
continue;
}
break;
}
// Read the response body
InputStream inputStream = con.getInputStream();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
byte[] data = outputStream.toByteArray(); // TODO: limit file size that can be read into memory
// Close the streams
outputStream.close();
inputStream.close();
// Extract filename
String filename = "";
if (inPath.contains("/")) {
String[] parts = inPath.split("/");
if (parts.length > 0) {
filename = parts[parts.length - 1];
}
}
// Parse and modify output if needed
if (HTMLParser.isHtmlFile(filename)) {
// HTML file - needs to be parsed
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, "light", true);
htmlParser.addAdditionalHeaderTags();
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' ws:; font-src 'self' data:;");
response.setContentType(con.getContentType());
response.setContentLength(htmlParser.getData().length);
response.getOutputStream().write(htmlParser.getData());
}
else {
// Regular file - can be streamed directly
response.addHeader("Content-Security-Policy", "default-src 'self'");
response.setContentType(con.getContentType());
response.setContentLength(data.length);
response.getOutputStream().write(data);
}
}
}

View File

@@ -14,6 +14,7 @@ import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
@@ -27,12 +28,15 @@ import org.qortal.api.ApiErrors;
import org.qortal.api.ApiException;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.AccountPenaltyStats;
import org.qortal.api.model.ApiOnlineAccount;
import org.qortal.api.model.RewardShareKeyRequest;
import org.qortal.asset.Asset;
import org.qortal.controller.LiteNode;
import org.qortal.controller.OnlineAccountsManager;
import org.qortal.crypto.Crypto;
import org.qortal.data.account.AccountData;
import org.qortal.data.account.AccountPenaltyData;
import org.qortal.data.account.RewardShareData;
import org.qortal.data.network.OnlineAccountData;
import org.qortal.data.network.OnlineAccountLevel;
@@ -109,18 +113,26 @@ public class AddressesResource {
if (!Crypto.isValidAddress(address))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
byte[] lastReference = null;
AccountData accountData;
try (final Repository repository = RepositoryManager.getRepository()) {
AccountData accountData = repository.getAccountRepository().getAccount(address);
// Not found?
if (accountData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
lastReference = accountData.getReference();
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
if (Settings.getInstance().isLite()) {
// Lite nodes request data from peers instead of the local db
accountData = LiteNode.getInstance().fetchAccountData(address);
}
else {
// All other node types request data from local db
try (final Repository repository = RepositoryManager.getRepository()) {
accountData = repository.getAccountRepository().getAccount(address);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
// Not found?
if (accountData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
byte[] lastReference = accountData.getReference();
if (lastReference == null || lastReference.length == 0)
return "false";
@@ -196,6 +208,10 @@ public class AddressesResource {
try (final Repository repository = RepositoryManager.getRepository()) {
List<OnlineAccountLevel> onlineAccountLevels = new ArrayList<>();
// Prepopulate all levels
for (int i=0; i<=10; i++)
onlineAccountLevels.add(new OnlineAccountLevel(i, 0));
for (OnlineAccountData onlineAccountData : onlineAccounts) {
try {
final int minterLevel = Account.getRewardShareEffectiveMintingLevelIncludingLevelZero(repository, onlineAccountData.getPublicKey());
@@ -458,6 +474,54 @@ public class AddressesResource {
}
}
@GET
@Path("/penalties")
@Operation(
summary = "Get addresses with penalties",
description = "Returns a list of accounts with a blocksMintedPenalty",
responses = {
@ApiResponse(
description = "accounts with penalties",
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = AccountPenaltyData.class)))
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public List<AccountPenaltyData> getAccountsWithPenalties() {
try (final Repository repository = RepositoryManager.getRepository()) {
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
List<AccountPenaltyData> penalties = accounts.stream().map(a -> new AccountPenaltyData(a.getAddress(), a.getBlocksMintedPenalty())).collect(Collectors.toList());
return penalties;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/penalties/stats")
@Operation(
summary = "Get stats about current penalties",
responses = {
@ApiResponse(
description = "aggregated stats about accounts with penalties",
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = AccountPenaltyStats.class)))
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public AccountPenaltyStats getPenaltyStats() {
try (final Repository repository = RepositoryManager.getRepository()) {
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
return AccountPenaltyStats.fromAccounts(accounts);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@POST
@Path("/publicize")
@Operation(

View File

@@ -0,0 +1,83 @@
package org.qortal.api.resource;
import com.google.common.io.Resources;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.Hidden;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.*;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
@Path("/apps")
@Tag(name = "Apps")
public class AppsResource {
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
@GET
@Path("/q-apps.js")
@Hidden // For internal Q-App API use only
@Operation(
summary = "Javascript interface for Q-Apps",
responses = {
@ApiResponse(
description = "javascript",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
public String getQAppsJs() {
URL url = Resources.getResource("q-apps/q-apps.js");
try {
return Resources.toString(url, StandardCharsets.UTF_8);
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
}
}
@GET
@Path("/q-apps-gateway.js")
@Hidden // For internal Q-App API use only
@Operation(
summary = "Gateway-specific interface for Q-Apps",
responses = {
@ApiResponse(
description = "javascript",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
public String getQAppsGatewayJs() {
URL url = Resources.getResource("q-apps/q-apps-gateway.js");
try {
return Resources.toString(url, StandardCharsets.UTF_8);
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
}
}
}

View File

@@ -1,6 +1,8 @@
package org.qortal.api.resource;
import com.google.common.primitives.Bytes;
import com.j256.simplemagic.ContentInfo;
import com.j256.simplemagic.ContentInfoUtil;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.media.ArraySchema;
@@ -12,11 +14,14 @@ import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.net.FileNameMap;
import java.net.URLConnection;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
@@ -25,11 +30,13 @@ import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bouncycastle.util.encoders.Base64;
import org.qortal.api.*;
import org.qortal.api.model.FileProperties;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.arbitrary.*;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
@@ -38,6 +45,7 @@ import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller;
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.controller.arbitrary.ArbitraryMetadataManager;
import org.qortal.data.account.AccountData;
@@ -45,6 +53,7 @@ import org.qortal.data.arbitrary.*;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
@@ -56,8 +65,7 @@ import org.qortal.transaction.Transaction.ValidationResult;
import org.qortal.transform.TransformationException;
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
import org.qortal.transform.transaction.TransactionTransformer;
import org.qortal.utils.Base58;
import org.qortal.utils.ZipUtils;
import org.qortal.utils.*;
@Path("/arbitrary")
@Tag(name = "Arbitrary")
@@ -85,11 +93,15 @@ public class ArbitraryResource {
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public List<ArbitraryResourceInfo> getResources(
@QueryParam("service") Service service,
@QueryParam("name") String name,
@QueryParam("identifier") String identifier,
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
@Parameter(description = "Include followed names only") @QueryParam("followedonly") Boolean followedOnly,
@Parameter(description = "Exclude blocked content") @QueryParam("excludeblocked") Boolean excludeBlocked,
@Parameter(description = "Filter names by list") @QueryParam("namefilter") String nameListFilter,
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata) {
@@ -106,18 +118,33 @@ public class ArbitraryResource {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource");
}
// Set up name filters if supplied
List<String> names = null;
if (name != null) {
// Filter using single name
names = Arrays.asList(name);
}
else if (nameListFilter != null) {
// Filter using supplied list of names
names = ResourceListManager.getInstance().getStringsInList(nameListFilter);
if (names.isEmpty()) {
// If list is empty (or doesn't exist) we can shortcut with empty response
return new ArrayList<>();
}
}
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
.getArbitraryResources(service, identifier, null, defaultRes, limit, offset, reverse);
.getArbitraryResources(service, identifier, names, defaultRes, followedOnly, excludeBlocked, limit, offset, reverse);
if (resources == null) {
return new ArrayList<>();
}
if (includeStatus != null && includeStatus) {
resources = this.addStatusToResources(resources);
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
}
if (includeMetadata != null && includeMetadata) {
resources = this.addMetadataToResources(resources);
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
}
return resources;
@@ -141,30 +168,56 @@ public class ArbitraryResource {
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public List<ArbitraryResourceInfo> searchResources(
@QueryParam("service") Service service,
@QueryParam("query") String query,
@Parameter(description = "Query (searches both name and identifier fields)") @QueryParam("query") String query,
@Parameter(description = "Identifier (searches identifier field only)") @QueryParam("identifier") String identifier,
@Parameter(description = "Name (searches name field only)") @QueryParam("name") List<String> names,
@Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
@Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly,
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
@Parameter(description = "Filter names by list (exact matches only)") @QueryParam("namefilter") String nameListFilter,
@Parameter(description = "Include followed names only") @QueryParam("followedonly") Boolean followedOnly,
@Parameter(description = "Exclude blocked content") @QueryParam("excludeblocked") Boolean excludeBlocked,
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata) {
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
try (final Repository repository = RepositoryManager.getRepository()) {
boolean defaultRes = Boolean.TRUE.equals(defaultResource);
boolean usePrefixOnly = Boolean.TRUE.equals(prefixOnly);
List<String> exactMatchNames = new ArrayList<>();
if (nameListFilter != null) {
// Load names from supplied list of names
exactMatchNames.addAll(ResourceListManager.getInstance().getStringsInList(nameListFilter));
// If list is empty (or doesn't exist) we can shortcut with empty response
if (exactMatchNames.isEmpty()) {
return new ArrayList<>();
}
}
// Move names to exact match list, if requested
if (exactMatchNamesOnly != null && exactMatchNamesOnly && names != null) {
exactMatchNames.addAll(names);
names = null;
}
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
.searchArbitraryResources(service, query, defaultRes, limit, offset, reverse);
.searchArbitraryResources(service, query, identifier, names, usePrefixOnly, exactMatchNames, defaultRes, followedOnly, excludeBlocked, limit, offset, reverse);
if (resources == null) {
return new ArrayList<>();
}
if (includeStatus != null && includeStatus) {
resources = this.addStatusToResources(resources);
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
}
if (includeMetadata != null && includeMetadata) {
resources = this.addMetadataToResources(resources);
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
}
return resources;
@@ -174,67 +227,6 @@ public class ArbitraryResource {
}
}
@GET
@Path("/resources/names")
@Operation(
summary = "List arbitrary resources available on chain, grouped by creator's name",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceInfo.class))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public List<ArbitraryResourceNameInfo> getResourcesGroupedByName(
@QueryParam("service") Service service,
@QueryParam("identifier") String identifier,
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata) {
try (final Repository repository = RepositoryManager.getRepository()) {
// Treat empty identifier as null
if (identifier != null && identifier.isEmpty()) {
identifier = null;
}
// Ensure that "default" and "identifier" parameters cannot coexist
boolean defaultRes = Boolean.TRUE.equals(defaultResource);
if (defaultRes == true && identifier != null) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource");
}
List<ArbitraryResourceNameInfo> creatorNames = repository.getArbitraryRepository()
.getArbitraryResourceCreatorNames(service, identifier, defaultRes, limit, offset, reverse);
for (ArbitraryResourceNameInfo creatorName : creatorNames) {
String name = creatorName.name;
if (name != null) {
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
.getArbitraryResources(service, identifier, name, defaultRes, null, null, reverse);
if (includeStatus != null && includeStatus) {
resources = this.addStatusToResources(resources);
}
if (includeMetadata != null && includeMetadata) {
resources = this.addMetadataToResources(resources);
}
creatorName.resources = resources;
}
}
return creatorNames;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/resource/status/{service}/{name}")
@Operation(
@@ -252,8 +244,33 @@ public class ArbitraryResource {
@PathParam("name") String name,
@QueryParam("build") Boolean build) {
Security.requirePriorAuthorizationOrApiKey(request, name, service, null);
return this.getStatus(service, name, null, build);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorizationOrApiKey(request, name, service, null, apiKey);
return ArbitraryTransactionUtils.getStatus(service, name, null, build);
}
@GET
@Path("/resource/properties/{service}/{name}/{identifier}")
@Operation(
summary = "Get properties of a QDN resource",
description = "This attempts a download of the data if it's not available locally. A filename will only be returned for single file resources. mimeType is only returned when it can be determined.",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = FileProperties.class))
)
}
)
@SecurityRequirement(name = "apiKey")
public FileProperties getResourceProperties(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("identifier") String identifier) {
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier, apiKey);
return this.getFileProperties(service, name, identifier);
}
@GET
@@ -274,8 +291,10 @@ public class ArbitraryResource {
@PathParam("identifier") String identifier,
@QueryParam("build") Boolean build) {
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier);
return this.getStatus(service, name, identifier, build);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier, apiKey);
return ArbitraryTransactionUtils.getStatus(service, name, identifier, build);
}
@@ -487,6 +506,9 @@ public class ArbitraryResource {
}
for (ArbitraryTransactionData transactionData : transactionDataList) {
if (transactionData.getService() == null) {
continue;
}
ArbitraryResourceInfo arbitraryResourceInfo = new ArbitraryResourceInfo();
arbitraryResourceInfo.name = transactionData.getName();
arbitraryResourceInfo.service = transactionData.getService();
@@ -497,10 +519,10 @@ public class ArbitraryResource {
}
if (includeStatus != null && includeStatus) {
resources = this.addStatusToResources(resources);
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
}
if (includeMetadata != null && includeMetadata) {
resources = this.addMetadataToResources(resources);
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
}
return resources;
@@ -530,7 +552,7 @@ public class ArbitraryResource {
Security.checkApiCallAllowed(request);
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
return resource.delete();
return resource.delete(false);
}
@POST
@@ -627,6 +649,7 @@ public class ArbitraryResource {
@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("filepath") String filepath,
@QueryParam("encoding") String encoding,
@QueryParam("rebuild") boolean rebuild,
@QueryParam("async") boolean async,
@QueryParam("attempts") Integer attempts) {
@@ -636,7 +659,7 @@ public class ArbitraryResource {
Security.checkApiCallAllowed(request);
}
return this.download(service, name, null, filepath, rebuild, async, attempts);
return this.download(service, name, null, filepath, encoding, rebuild, async, attempts);
}
@GET
@@ -662,16 +685,17 @@ public class ArbitraryResource {
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@QueryParam("filepath") String filepath,
@QueryParam("encoding") String encoding,
@QueryParam("rebuild") boolean rebuild,
@QueryParam("async") boolean async,
@QueryParam("attempts") Integer attempts) {
// Authentication can be bypassed in the settings, for those running public QDN nodes
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
Security.checkApiCallAllowed(request);
Security.checkApiCallAllowed(request, apiKey);
}
return this.download(service, name, identifier, filepath, rebuild, async, attempts);
return this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts);
}
@@ -694,18 +718,15 @@ public class ArbitraryResource {
}
)
@SecurityRequirement(name = "apiKey")
public ArbitraryResourceMetadata getMetadata(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("identifier") String identifier) {
Security.checkApiCallAllowed(request);
public ArbitraryResourceMetadata getMetadata(@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("identifier") String identifier) {
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
try {
ArbitraryDataTransactionMetadata transactionMetadata = ArbitraryMetadataManager.getInstance().fetchMetadata(resource, false);
if (transactionMetadata != null) {
ArbitraryResourceMetadata resourceMetadata = ArbitraryResourceMetadata.fromTransactionMetadata(transactionMetadata);
ArbitraryResourceMetadata resourceMetadata = ArbitraryResourceMetadata.fromTransactionMetadata(transactionMetadata, true);
if (resourceMetadata != null) {
return resourceMetadata;
}
@@ -719,7 +740,7 @@ public class ArbitraryResource {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
}
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
}
@@ -759,6 +780,8 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String path) {
Security.checkApiCallAllowed(request);
@@ -767,7 +790,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false,
title, description, tags, category);
fee, null, title, description, tags, category, preview);
}
@POST
@@ -804,6 +827,8 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String path) {
Security.checkApiCallAllowed(request);
@@ -812,7 +837,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false,
title, description, tags, category);
fee, null, title, description, tags, category, preview);
}
@@ -850,6 +875,9 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64) {
Security.checkApiCallAllowed(request);
@@ -858,7 +886,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false,
title, description, tags, category);
fee, filename, title, description, tags, category, preview);
}
@POST
@@ -893,6 +921,9 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64) {
Security.checkApiCallAllowed(request);
@@ -901,7 +932,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false,
title, description, tags, category);
fee, filename, title, description, tags, category, preview);
}
@@ -938,6 +969,8 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64Zip) {
Security.checkApiCallAllowed(request);
@@ -946,7 +979,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true,
title, description, tags, category);
fee, null, title, description, tags, category, preview);
}
@POST
@@ -981,6 +1014,8 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64Zip) {
Security.checkApiCallAllowed(request);
@@ -989,7 +1024,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true,
title, description, tags, category);
fee, null, title, description, tags, category, preview);
}
@@ -1029,6 +1064,9 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String string) {
Security.checkApiCallAllowed(request);
@@ -1037,7 +1075,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false,
title, description, tags, category);
fee, filename, title, description, tags, category, preview);
}
@POST
@@ -1074,6 +1112,9 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("filename") String filename,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String string) {
Security.checkApiCallAllowed(request);
@@ -1082,15 +1123,48 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false,
title, description, tags, category);
fee, filename, title, description, tags, category, preview);
}
// Shared methods
private String preview(String directoryPath, Service service) {
Security.checkApiCallAllowed(request);
ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.PUT;
ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.ZIP;
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath),
null, service, null, method, compression,
null, null, null, null);
try {
arbitraryDataWriter.save();
} catch (IOException | DataException | InterruptedException | MissingDataException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
} catch (RuntimeException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
}
ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
if (arbitraryDataFile != null) {
String digest58 = arbitraryDataFile.digest58();
if (digest58 != null) {
// Pre-authorize resource
ArbitraryDataResource resource = new ArbitraryDataResource(digest58, null, null, null);
ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource);
return "/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret());
}
}
return "Unable to generate preview URL";
}
private String upload(Service service, String name, String identifier,
String path, String string, String base64, boolean zipped,
String title, String description, List<String> tags, Category category) {
String path, String string, String base64, boolean zipped, Long fee, String filename,
String title, String description, List<String> tags, Category category,
Boolean preview) {
// Fetch public key from registered name
try (final Repository repository = RepositoryManager.getRepository()) {
NameData nameData = repository.getNameRepository().fromName(name);
@@ -1099,7 +1173,12 @@ public class ArbitraryResource {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, error);
}
if (!Controller.getInstance().isUpToDate()) {
final Long now = NTP.getTime();
if (now == null) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NO_TIME_SYNC);
}
final Long minLatestBlockTimestamp = now - (60 * 60 * 1000L);
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
}
@@ -1113,7 +1192,12 @@ public class ArbitraryResource {
if (path == null) {
// See if we have a string instead
if (string != null) {
File tempFile = File.createTempFile("qortal-", ".tmp");
if (filename == null) {
// Use current time as filename
filename = String.format("qortal-%d", NTP.getTime());
}
java.nio.file.Path tempDirectory = Files.createTempDirectory("qortal-");
File tempFile = Paths.get(tempDirectory.toString(), filename).toFile();
tempFile.deleteOnExit();
BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile.toPath().toString()));
writer.write(string);
@@ -1123,7 +1207,12 @@ public class ArbitraryResource {
}
// ... or base64 encoded raw data
else if (base64 != null) {
File tempFile = File.createTempFile("qortal-", ".tmp");
if (filename == null) {
// Use current time as filename
filename = String.format("qortal-%d", NTP.getTime());
}
java.nio.file.Path tempDirectory = Files.createTempDirectory("qortal-");
File tempFile = Paths.get(tempDirectory.toString(), filename).toFile();
tempFile.deleteOnExit();
Files.write(tempFile.toPath(), Base64.decode(base64));
path = tempFile.toPath().toString();
@@ -1146,15 +1235,25 @@ public class ArbitraryResource {
// The actual data will be in a randomly-named subfolder of tempDirectory
// Remove hidden folders, i.e. starting with "_", as some systems can add them, e.g. "__MACOSX"
String[] files = tempDirectory.toFile().list((parent, child) -> !child.startsWith("_"));
if (files.length == 1) { // Single directory or file only
if (files != null && files.length == 1) { // Single directory or file only
path = Paths.get(tempDirectory.toString(), files[0]).toString();
}
}
}
// Finish here if user has requested a preview
if (preview != null && preview == true) {
return this.preview(path, service);
}
// Default to zero fee if not specified
if (fee == null) {
fee = 0L;
}
try {
ArbitraryDataTransactionBuilder transactionBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, Paths.get(path), name, null, service, identifier,
repository, publicKey58, fee, Paths.get(path), name, null, service, identifier,
title, description, tags, category
);
@@ -1168,12 +1267,13 @@ public class ArbitraryResource {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
}
} catch (DataException | IOException e) {
} catch (Exception e) {
LOGGER.info("Exception when publishing data: ", e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
}
}
private HttpServletResponse download(Service service, String name, String identifier, String filepath, boolean rebuild, boolean async, Integer maxAttempts) {
private HttpServletResponse download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts) {
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
try {
@@ -1216,7 +1316,7 @@ public class ArbitraryResource {
if (filepath == null || filepath.isEmpty()) {
// No file path supplied - so check if this is a single file resource
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
if (files.length == 1) {
if (files != null && files.length == 1) {
// This is a single file resource
filepath = files[0];
}
@@ -1226,13 +1326,50 @@ public class ArbitraryResource {
}
}
// TODO: limit file size that can be read into memory
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
if (!Files.exists(path)) {
String message = String.format("No file exists at filepath: %s", filepath);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message);
}
byte[] data = Files.readAllBytes(path);
byte[] data;
int fileSize = (int)path.toFile().length();
int length = fileSize;
// Parse "Range" header
Integer rangeStart = null;
Integer rangeEnd = null;
String range = request.getHeader("Range");
if (range != null) {
range = range.replace("bytes=", "");
String[] parts = range.split("-");
rangeStart = (parts != null && parts.length > 0) ? Integer.parseInt(parts[0]) : null;
rangeEnd = (parts != null && parts.length > 1) ? Integer.parseInt(parts[1]) : fileSize;
}
if (rangeStart != null && rangeEnd != null) {
// We have a range, so update the requested length
length = rangeEnd - rangeStart;
}
if (length < fileSize && encoding == null) {
// Partial content requested, and not encoding the data
response.setStatus(206);
response.addHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd-1, fileSize));
data = FilesystemUtils.readFromFile(path.toString(), rangeStart, length);
}
else {
// Full content requested (or encoded data)
response.setStatus(200);
data = Files.readAllBytes(path); // TODO: limit file size that can be read into memory
}
// Encode the data if requested
if (encoding != null && Objects.equals(encoding.toLowerCase(), "base64")) {
data = Base64.encode(data);
}
response.addHeader("Accept-Ranges", "bytes");
response.setContentType(context.getMimeType(path.toString()));
response.setContentLength(data.length);
response.getOutputStream().write(data);
@@ -1244,59 +1381,44 @@ public class ArbitraryResource {
}
}
private FileProperties getFileProperties(Service service, String name, String identifier) {
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
try {
arbitraryDataReader.loadSynchronously(false);
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
if (outputPath == null) {
// Assume the resource doesn't exist
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
}
private ArbitraryResourceStatus getStatus(Service service, String name, String identifier, Boolean build) {
FileProperties fileProperties = new FileProperties();
fileProperties.size = FileUtils.sizeOfDirectory(outputPath.toFile());
// If "build=true" has been specified in the query string, build the resource before returning its status
if (build != null && build == true) {
ArbitraryDataReader reader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, null);
try {
if (!reader.isBuilding()) {
reader.loadSynchronously(false);
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
if (files.length == 1) {
String filename = files[0];
java.nio.file.Path filePath = Paths.get(outputPath.toString(), files[0]);
ContentInfoUtil util = new ContentInfoUtil();
ContentInfo info = util.findMatch(filePath.toFile());
String mimeType;
if (info != null) {
// Attempt to extract MIME type from file contents
mimeType = info.getMimeType();
}
} catch (Exception e) {
// No need to handle exception, as it will be reflected in the status
}
}
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
return resource.getStatus(false);
}
private List<ArbitraryResourceInfo> addStatusToResources(List<ArbitraryResourceInfo> resources) {
// Determine and add the status of each resource
List<ArbitraryResourceInfo> updatedResources = new ArrayList<>();
for (ArbitraryResourceInfo resourceInfo : resources) {
try {
ArbitraryDataResource resource = new ArbitraryDataResource(resourceInfo.name, ResourceIdType.NAME,
resourceInfo.service, resourceInfo.identifier);
ArbitraryResourceStatus status = resource.getStatus(true);
if (status != null) {
resourceInfo.status = status;
else {
// Fall back to using the filename
FileNameMap fileNameMap = URLConnection.getFileNameMap();
mimeType = fileNameMap.getContentTypeFor(filename);
}
updatedResources.add(resourceInfo);
} catch (Exception e) {
// Catch and log all exceptions, since some systems are experiencing 500 errors when including statuses
LOGGER.info("Caught exception when adding status to resource %s: %s", resourceInfo, e.toString());
fileProperties.filename = filename;
fileProperties.mimeType = mimeType;
}
}
return updatedResources;
}
private List<ArbitraryResourceInfo> addMetadataToResources(List<ArbitraryResourceInfo> resources) {
// Add metadata fields to each resource if they exist
List<ArbitraryResourceInfo> updatedResources = new ArrayList<>();
for (ArbitraryResourceInfo resourceInfo : resources) {
ArbitraryDataResource resource = new ArbitraryDataResource(resourceInfo.name, ResourceIdType.NAME,
resourceInfo.service, resourceInfo.identifier);
ArbitraryDataTransactionMetadata transactionMetadata = resource.getLatestTransactionMetadata();
ArbitraryResourceMetadata resourceMetadata = ArbitraryResourceMetadata.fromTransactionMetadata(transactionMetadata);
if (resourceMetadata != null) {
resourceInfo.metadata = resourceMetadata;
}
updatedResources.add(resourceInfo);
return fileProperties;
} catch (Exception e) {
LOGGER.debug(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
}
return updatedResources;
}
}

View File

@@ -48,6 +48,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import org.qortal.utils.Base58;
import org.qortal.utils.Triple;
@Path("/blocks")
@Tag(name = "Blocks")
@@ -114,7 +115,7 @@ public class BlocksResource {
@Path("/signature/{signature}/data")
@Operation(
summary = "Fetch serialized, base58 encoded block data using base58 signature",
description = "Returns serialized data for the block that matches the given signature",
description = "Returns serialized data for the block that matches the given signature, and an optional block serialization version",
responses = {
@ApiResponse(
description = "the block data",
@@ -125,7 +126,7 @@ public class BlocksResource {
@ApiErrors({
ApiError.INVALID_SIGNATURE, ApiError.BLOCK_UNKNOWN, ApiError.INVALID_DATA, ApiError.REPOSITORY_ISSUE
})
public String getSerializedBlockData(@PathParam("signature") String signature58) {
public String getSerializedBlockData(@PathParam("signature") String signature58, @QueryParam("version") Integer version) {
// Decode signature
byte[] signature;
try {
@@ -136,20 +137,44 @@ public class BlocksResource {
try (final Repository repository = RepositoryManager.getRepository()) {
// Default to version 1
if (version == null) {
version = 1;
}
// Check the database first
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
if (blockData != null) {
Block block = new Block(repository, blockData);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
bytes.write(BlockTransformer.toBytes(block));
switch (version) {
case 1:
bytes.write(BlockTransformer.toBytes(block));
break;
case 2:
bytes.write(BlockTransformer.toBytesV2(block));
break;
default:
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
return Base58.encode(bytes.toByteArray());
}
// Not found, so try the block archive
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (bytes != null) {
return Base58.encode(bytes);
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
if (version != serializationVersion) {
// TODO: we could quite easily reserialize the block with the requested version
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Block is not stored using requested serialization version.");
}
return Base58.encode(bytes);
}
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
@@ -197,14 +222,25 @@ public class BlocksResource {
}
try (final Repository repository = RepositoryManager.getRepository()) {
// Check if the block exists in either the database or archive
if (repository.getBlockRepository().getHeightFromSignature(signature) == 0 &&
repository.getBlockArchiveRepository().getHeightFromSignature(signature) == 0) {
// Not found in either the database or archive
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
}
// Check if the block exists in either the database or archive
int height = repository.getBlockRepository().getHeightFromSignature(signature);
if (height == 0) {
height = repository.getBlockArchiveRepository().getHeightFromSignature(signature);
if (height == 0) {
// Not found in either the database or archive
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
}
}
return repository.getBlockRepository().getTransactionsFromSignature(signature, limit, offset, reverse);
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, height, height);
// Expand signatures to transactions
List<TransactionData> transactions = new ArrayList<>(signatures.size());
for (byte[] s : signatures) {
transactions.add(repository.getTransactionRepository().fromSignature(s));
}
return transactions;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
@@ -613,13 +649,16 @@ public class BlocksResource {
@ApiErrors({
ApiError.REPOSITORY_ISSUE
})
public List<BlockData> getBlockRange(@PathParam("height") int height, @Parameter(
ref = "count"
) @QueryParam("count") int count) {
public List<BlockData> getBlockRange(@PathParam("height") int height,
@Parameter(ref = "count") @QueryParam("count") int count,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
@QueryParam("includeOnlineSignatures") Boolean includeOnlineSignatures) {
try (final Repository repository = RepositoryManager.getRepository()) {
List<BlockData> blocks = new ArrayList<>();
boolean shouldReverse = (reverse != null && reverse == true);
for (/* count already set */; count > 0; --count, ++height) {
int i = 0;
while (i < count) {
BlockData blockData = repository.getBlockRepository().fromHeight(height);
if (blockData == null) {
// Not found - try the archive
@@ -629,8 +668,14 @@ public class BlocksResource {
break;
}
}
if (includeOnlineSignatures == null || includeOnlineSignatures == false) {
blockData.setOnlineAccountsSignatures(null);
}
blocks.add(blockData);
height = shouldReverse ? height - 1 : height + 1;
i++;
}
return blocks;

View File

@@ -40,6 +40,8 @@ import org.qortal.utils.Base58;
import com.google.common.primitives.Bytes;
import static org.qortal.data.chat.ChatMessage.Encoding;
@Path("/chat")
@Tag(name = "Chat")
public class ChatResource {
@@ -69,6 +71,11 @@ public class ChatResource {
public List<ChatMessage> searchChat(@QueryParam("before") Long before, @QueryParam("after") Long after,
@QueryParam("txGroupId") Integer txGroupId,
@QueryParam("involving") List<String> involvingAddresses,
@QueryParam("reference") String reference,
@QueryParam("chatreference") String chatReference,
@QueryParam("haschatreference") Boolean hasChatReference,
@QueryParam("sender") String sender,
@QueryParam("encoding") Encoding encoding,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
@@ -87,18 +94,132 @@ public class ChatResource {
if (after != null && after < 1500000000000L)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
byte[] referenceBytes = null;
if (reference != null)
referenceBytes = Base58.decode(reference);
byte[] chatReferenceBytes = null;
if (chatReference != null)
chatReferenceBytes = Base58.decode(chatReference);
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getChatRepository().getMessagesMatchingCriteria(
before,
after,
txGroupId,
referenceBytes,
chatReferenceBytes,
hasChatReference,
involvingAddresses,
sender,
encoding,
limit, offset, reverse);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/messages/count")
@Operation(
summary = "Count chat messages",
description = "Returns count of CHAT messages that match criteria. Must provide EITHER 'txGroupId' OR two 'involving' addresses.",
responses = {
@ApiResponse(
description = "count of messages",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "integer"
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public int countChatMessages(@QueryParam("before") Long before, @QueryParam("after") Long after,
@QueryParam("txGroupId") Integer txGroupId,
@QueryParam("involving") List<String> involvingAddresses,
@QueryParam("reference") String reference,
@QueryParam("chatreference") String chatReference,
@QueryParam("haschatreference") Boolean hasChatReference,
@QueryParam("sender") String sender,
@QueryParam("encoding") Encoding encoding,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
// Check args meet expectations
if ((txGroupId == null && involvingAddresses.size() != 2)
|| (txGroupId != null && !involvingAddresses.isEmpty()))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
// Check any provided addresses are valid
if (involvingAddresses.stream().anyMatch(address -> !Crypto.isValidAddress(address)))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
if (before != null && before < 1500000000000L)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
if (after != null && after < 1500000000000L)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
byte[] referenceBytes = null;
if (reference != null)
referenceBytes = Base58.decode(reference);
byte[] chatReferenceBytes = null;
if (chatReference != null)
chatReferenceBytes = Base58.decode(chatReference);
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getChatRepository().getMessagesMatchingCriteria(
before,
after,
txGroupId,
referenceBytes,
chatReferenceBytes,
hasChatReference,
involvingAddresses,
sender,
encoding,
limit, offset, reverse).size();
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/message/{signature}")
@Operation(
summary = "Find chat message by signature",
responses = {
@ApiResponse(
description = "CHAT message",
content = @Content(
schema = @Schema(
implementation = ChatMessage.class
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public ChatMessage getMessageBySignature(@PathParam("signature") String signature58, @QueryParam("encoding") Encoding encoding) {
byte[] signature = Base58.decode(signature58);
try (final Repository repository = RepositoryManager.getRepository()) {
ChatTransactionData chatTransactionData = (ChatTransactionData) repository.getTransactionRepository().fromSignature(signature);
if (chatTransactionData == null) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Message not found");
}
return repository.getChatRepository().toChatMessage(chatTransactionData, encoding);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/active/{address}")
@Operation(
@@ -116,12 +237,12 @@ public class ChatResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public ActiveChats getActiveChats(@PathParam("address") String address) {
public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
if (address == null || !Crypto.isValidAddress(address))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getChatRepository().getActiveChats(address);
return repository.getChatRepository().getActiveChats(address, encoding);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}

View File

@@ -14,6 +14,7 @@ import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
@@ -35,6 +36,37 @@ public class CrossChainBitcoinResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current Bitcoin block height",
description = "Returns the height of the most recent block in the Bitcoin chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getBitcoinHeight() {
Bitcoin bitcoin = Bitcoin.getInstance();
try {
Integer height = bitcoin.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
@@ -68,7 +100,7 @@ public class CrossChainBitcoinResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
Long balance = bitcoin.getWalletBalanceFromTransactions(key58);
Long balance = bitcoin.getWalletBalance(key58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
@@ -118,6 +150,45 @@ public class CrossChainBitcoinResource {
}
}
@POST
@Path("/unusedaddress")
@Operation(
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getUnusedBitcoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Bitcoin bitcoin = Bitcoin.getInstance();
if (!bitcoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return bitcoin.getUnusedReceiveAddress(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/send")
@Operation(

View File

@@ -0,0 +1,248 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.ArraySchema;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.bitcoinj.core.Transaction;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.crosschain.DigibyteSendRequest;
import org.qortal.crosschain.Digibyte;
import org.qortal.crosschain.ForeignBlockchainException;
import org.qortal.crosschain.SimpleTransaction;
@Path("/crosschain/dgb")
@Tag(name = "Cross-Chain (Digibyte)")
public class CrossChainDigibyteResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current Digibyte block height",
description = "Returns the height of the most recent block in the Digibyte chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getDigibyteHeight() {
Digibyte digibyte = Digibyte.getInstance();
try {
Integer height = digibyte.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
summary = "Returns DGB balance for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getDigibyteWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Digibyte digibyte = Digibyte.getInstance();
if (!digibyte.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
Long balance = digibyte.getWalletBalance(key58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return balance.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/wallettransactions")
@Operation(
summary = "Returns transactions for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getDigibyteWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Digibyte digibyte = Digibyte.getInstance();
if (!digibyte.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return digibyte.getWalletTransactions(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/unusedaddress")
@Operation(
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getUnusedDigibyteReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Digibyte digibyte = Digibyte.getInstance();
if (!digibyte.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return digibyte.getUnusedReceiveAddress(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/send")
@Operation(
summary = "Sends DGB from hierarchical, deterministic BIP32 wallet to specific address",
description = "Currently supports 'legacy' P2PKH Digibyte addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = DigibyteSendRequest.class
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, DigibyteSendRequest digibyteSendRequest) {
Security.checkApiCallAllowed(request);
if (digibyteSendRequest.digibyteAmount <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
if (digibyteSendRequest.feePerByte != null && digibyteSendRequest.feePerByte <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
Digibyte digibyte = Digibyte.getInstance();
if (!digibyte.isValidAddress(digibyteSendRequest.receivingAddress))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
if (!digibyte.isValidDeterministicKey(digibyteSendRequest.xprv58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
Transaction spendTransaction = digibyte.buildSpend(digibyteSendRequest.xprv58,
digibyteSendRequest.receivingAddress,
digibyteSendRequest.digibyteAmount,
digibyteSendRequest.feePerByte);
if (spendTransaction == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE);
try {
digibyte.broadcastTransaction(spendTransaction);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
return spendTransaction.getTxId().toString();
}
}

View File

@@ -21,6 +21,7 @@ import org.qortal.crosschain.SimpleTransaction;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
@@ -33,6 +34,37 @@ public class CrossChainDogecoinResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current Dogecoin block height",
description = "Returns the height of the most recent block in the Dogecoin chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getDogecoinHeight() {
Dogecoin dogecoin = Dogecoin.getInstance();
try {
Integer height = dogecoin.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
@@ -66,7 +98,7 @@ public class CrossChainDogecoinResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
Long balance = dogecoin.getWalletBalanceFromTransactions(key58);
Long balance = dogecoin.getWalletBalance(key58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
@@ -116,6 +148,45 @@ public class CrossChainDogecoinResource {
}
}
@POST
@Path("/unusedaddress")
@Operation(
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getUnusedDogecoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Dogecoin dogecoin = Dogecoin.getInstance();
if (!dogecoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return dogecoin.getUnusedReceiveAddress(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/send")
@Operation(

View File

@@ -1,5 +1,6 @@
package org.qortal.api.resource;
import com.google.common.hash.HashCode;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
@@ -9,6 +10,8 @@ import io.swagger.v3.oas.annotations.tags.Tag;
import java.math.BigDecimal;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
@@ -284,6 +287,12 @@ public class CrossChainHtlcResource {
continue;
}
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
LOGGER.info("Skipping AT {} because ARRR is currently unsupported", atAddress);
continue;
}
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atData);
if (crossChainTradeData == null) {
LOGGER.info("Couldn't find crosschain trade data for AT {}", atAddress);
@@ -363,10 +372,6 @@ public class CrossChainHtlcResource {
// Use secret-A to redeem P2SH-A
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
if (bitcoiny.getClass() == Bitcoin.class) {
LOGGER.info("Redeeming a Bitcoin HTLC is not yet supported");
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
int lockTime = crossChainTradeData.lockTimeA;
byte[] redeemScriptA = BitcoinyHTLC.buildScript(crossChainTradeData.partnerForeignPKH, lockTime, crossChainTradeData.creatorForeignPKH, crossChainTradeData.hashOfSecretA);
@@ -574,70 +579,108 @@ public class CrossChainHtlcResource {
// If the AT is "finished" then it will have a zero balance
// In these cases we should avoid HTLC refunds if tbe QORT haven't been returned to the seller
if (atData.getIsFinished() && crossChainTradeData.mode != AcctMode.REFUNDED && crossChainTradeData.mode != AcctMode.CANCELLED) {
LOGGER.info(String.format("Skipping AT %s because the QORT has already been redemed", atAddress));
LOGGER.info(String.format("Skipping AT %s because the QORT has already been redeemed by the buyer", atAddress));
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
TradeBotData tradeBotData = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).findFirst().orElse(null);
if (tradeBotData == null)
List<TradeBotData> tradeBotDataList = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).collect(Collectors.toList());
if (tradeBotDataList == null || tradeBotDataList.isEmpty())
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
if (bitcoiny.getClass() == Bitcoin.class) {
LOGGER.info("Refunding a Bitcoin HTLC is not yet supported");
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
// Loop through all matching entries for this AT address, as there might be more than one
for (TradeBotData tradeBotData : tradeBotDataList) {
int lockTime = tradeBotData.getLockTimeA();
if (tradeBotData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
// We can't refund P2SH-A until lockTime-A has passed
if (NTP.getTime() <= lockTime * 1000L)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
int lockTime = tradeBotData.getLockTimeA();
// We can't refund P2SH-A until median block time has passed lockTime-A (see BIP113)
int medianBlockTime = bitcoiny.getMedianBlockTime();
if (medianBlockTime <= lockTime)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
// We can't refund P2SH-A until lockTime-A has passed
if (NTP.getTime() <= lockTime * 1000L)
continue;
byte[] redeemScriptA = BitcoinyHTLC.buildScript(tradeBotData.getTradeForeignPublicKeyHash(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
String p2shAddressA = bitcoiny.deriveP2shAddress(redeemScriptA);
LOGGER.info(String.format("Refunding P2SH address: %s", p2shAddressA));
// We can't refund P2SH-A until median block time has passed lockTime-A (see BIP113)
int medianBlockTime = bitcoiny.getMedianBlockTime();
if (medianBlockTime <= lockTime)
continue;
// Fee for redeem/refund is subtracted from P2SH-A balance.
long feeTimestamp = calcFeeTimestamp(lockTime, crossChainTradeData.tradeTimeout);
long p2shFee = bitcoiny.getP2shFee(feeTimestamp);
long minimumAmountA = crossChainTradeData.expectedForeignAmount + p2shFee;
BitcoinyHTLC.Status htlcStatusA = BitcoinyHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
// Fee for redeem/refund is subtracted from P2SH-A balance.
long feeTimestamp = calcFeeTimestamp(lockTime, crossChainTradeData.tradeTimeout);
long p2shFee = bitcoiny.getP2shFee(feeTimestamp);
long minimumAmountA = crossChainTradeData.expectedForeignAmount + p2shFee;
switch (htlcStatusA) {
case UNFUNDED:
case FUNDING_IN_PROGRESS:
// Still waiting for P2SH-A to be funded...
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
// Create redeem script based on destination chain
byte[] redeemScriptA;
String p2shAddressA;
BitcoinyHTLC.Status htlcStatusA;
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
redeemScriptA = PirateChainHTLC.buildScript(tradeBotData.getTradeForeignPublicKey(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
p2shAddressA = PirateChain.getInstance().deriveP2shAddressBPrefix(redeemScriptA);
htlcStatusA = PirateChainHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
} else {
redeemScriptA = BitcoinyHTLC.buildScript(tradeBotData.getTradeForeignPublicKeyHash(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
p2shAddressA = bitcoiny.deriveP2shAddress(redeemScriptA);
htlcStatusA = BitcoinyHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
}
LOGGER.info(String.format("Refunding P2SH address: %s", p2shAddressA));
case REDEEM_IN_PROGRESS:
case REDEEMED:
case REFUND_IN_PROGRESS:
case REFUNDED:
// Too late!
return false;
switch (htlcStatusA) {
case UNFUNDED:
case FUNDING_IN_PROGRESS:
// Still waiting for P2SH-A to be funded...
continue;
case FUNDED:{
Coin refundAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount);
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
List<TransactionOutput> fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
case REDEEM_IN_PROGRESS:
case REDEEMED:
case REFUND_IN_PROGRESS:
case REFUNDED:
// Too late!
continue;
// Validate the destination foreign blockchain address
Address receiving = Address.fromString(bitcoiny.getNetworkParameters(), receiveAddress);
if (receiving.getOutputScriptType() != Script.ScriptType.P2PKH)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
case FUNDED: {
Coin refundAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount);
Transaction p2shRefundTransaction = BitcoinyHTLC.buildRefundTransaction(bitcoiny.getNetworkParameters(), refundAmount, refundKey,
fundingOutputs, redeemScriptA, lockTime, receiving.getHash());
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
// Pirate Chain custom integration
bitcoiny.broadcastTransaction(p2shRefundTransaction);
return true;
PirateChain pirateChain = PirateChain.getInstance();
String p2shAddressT3 = pirateChain.deriveP2shAddress(redeemScriptA);
// Get funding txid
String fundingTxidHex = PirateChainHTLC.getUnspentFundingTxid(pirateChain.getBlockchainProvider(), p2shAddressA, minimumAmountA);
if (fundingTxidHex == null) {
throw new ForeignBlockchainException("Missing funding txid when refunding P2SH");
}
String fundingTxid58 = Base58.encode(HashCode.fromString(fundingTxidHex).asBytes());
byte[] privateKey = tradeBotData.getTradePrivateKey();
String privateKey58 = Base58.encode(privateKey);
String redeemScript58 = Base58.encode(redeemScriptA);
String txid = PirateChain.getInstance().refundP2sh(p2shAddressT3,
receiveAddress, refundAmount.value, redeemScript58, fundingTxid58, lockTime, privateKey58);
LOGGER.info("Refund txid: {}", txid);
} else {
// ElectrumX coins
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
List<TransactionOutput> fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
// Validate the destination foreign blockchain address
Address receiving = Address.fromString(bitcoiny.getNetworkParameters(), receiveAddress);
if (receiving.getOutputScriptType() != Script.ScriptType.P2PKH)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
Transaction p2shRefundTransaction = BitcoinyHTLC.buildRefundTransaction(bitcoiny.getNetworkParameters(), refundAmount, refundKey,
fundingOutputs, redeemScriptA, lockTime, receiving.getHash());
bitcoiny.broadcastTransaction(p2shRefundTransaction);
}
return true;
}
}
}

View File

@@ -14,6 +14,7 @@ import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
@@ -35,6 +36,37 @@ public class CrossChainLitecoinResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current Litecoin block height",
description = "Returns the height of the most recent block in the Litecoin chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getLitecoinHeight() {
Litecoin litecoin = Litecoin.getInstance();
try {
Integer height = litecoin.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
@@ -68,7 +100,7 @@ public class CrossChainLitecoinResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
Long balance = litecoin.getWalletBalanceFromTransactions(key58);
Long balance = litecoin.getWalletBalance(key58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
@@ -118,6 +150,45 @@ public class CrossChainLitecoinResource {
}
}
@POST
@Path("/unusedaddress")
@Operation(
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getUnusedLitecoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Litecoin litecoin = Litecoin.getInstance();
if (!litecoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return litecoin.getUnusedReceiveAddress(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/send")
@Operation(

View File

@@ -0,0 +1,261 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.ArraySchema;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.crosschain.PirateChainSendRequest;
import org.qortal.crosschain.ForeignBlockchainException;
import org.qortal.crosschain.PirateChain;
import org.qortal.crosschain.SimpleTransaction;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.util.List;
@Path("/crosschain/arrr")
@Tag(name = "Cross-Chain (Pirate Chain)")
public class CrossChainPirateChainResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current PirateChain block height",
description = "Returns the height of the most recent block in the PirateChain chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getPirateChainHeight() {
PirateChain pirateChain = PirateChain.getInstance();
try {
Integer height = pirateChain.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
summary = "Returns ARRR balance",
description = "Supply 32 bytes of entropy, Base58 encoded",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "32 bytes of entropy, Base58 encoded",
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getPirateChainWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
Security.checkApiCallAllowed(request);
PirateChain pirateChain = PirateChain.getInstance();
try {
Long balance = pirateChain.getWalletBalance(entropy58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return balance.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
}
}
@POST
@Path("/wallettransactions")
@Operation(
summary = "Returns transactions",
description = "Supply 32 bytes of entropy, Base58 encoded",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "32 bytes of entropy, Base58 encoded",
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getPirateChainWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
Security.checkApiCallAllowed(request);
PirateChain pirateChain = PirateChain.getInstance();
try {
return pirateChain.getWalletTransactions(entropy58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
}
}
@POST
@Path("/send")
@Operation(
summary = "Sends ARRR from wallet",
description = "Currently supports 'legacy' P2PKH PirateChain addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "32 bytes of entropy, Base58 encoded",
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, PirateChainSendRequest pirateChainSendRequest) {
Security.checkApiCallAllowed(request);
if (pirateChainSendRequest.arrrAmount <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
if (pirateChainSendRequest.feePerByte != null && pirateChainSendRequest.feePerByte <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
PirateChain pirateChain = PirateChain.getInstance();
try {
return pirateChain.sendCoins(pirateChainSendRequest);
} catch (ForeignBlockchainException e) {
// TODO
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
}
}
@POST
@Path("/walletaddress")
@Operation(
summary = "Returns main wallet address",
description = "Supply 32 bytes of entropy, Base58 encoded",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "32 bytes of entropy, Base58 encoded",
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getPirateChainWalletAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
Security.checkApiCallAllowed(request);
PirateChain pirateChain = PirateChain.getInstance();
try {
return pirateChain.getWalletAddress(entropy58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
}
}
@POST
@Path("/syncstatus")
@Operation(
summary = "Returns synchronization status",
description = "Supply 32 bytes of entropy, Base58 encoded",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "32 bytes of entropy, Base58 encoded",
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getPirateChainSyncStatus(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
Security.checkApiCallAllowed(request);
PirateChain pirateChain = PirateChain.getInstance();
try {
return pirateChain.getSyncStatus(entropy58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
}
}
}

View File

@@ -0,0 +1,248 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.ArraySchema;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.bitcoinj.core.Transaction;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.crosschain.RavencoinSendRequest;
import org.qortal.crosschain.Ravencoin;
import org.qortal.crosschain.ForeignBlockchainException;
import org.qortal.crosschain.SimpleTransaction;
@Path("/crosschain/rvn")
@Tag(name = "Cross-Chain (Ravencoin)")
public class CrossChainRavencoinResource {
@Context
HttpServletRequest request;
@GET
@Path("/height")
@Operation(
summary = "Returns current Ravencoin block height",
description = "Returns the height of the most recent block in the Ravencoin chain.",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
public String getRavencoinHeight() {
Ravencoin ravencoin = Ravencoin.getInstance();
try {
Integer height = ravencoin.getBlockchainHeight();
if (height == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return height.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/walletbalance")
@Operation(
summary = "Returns RVN balance for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getRavencoinWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Ravencoin ravencoin = Ravencoin.getInstance();
if (!ravencoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
Long balance = ravencoin.getWalletBalance(key58);
if (balance == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
return balance.toString();
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/wallettransactions")
@Operation(
summary = "Returns transactions for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getRavencoinWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Ravencoin ravencoin = Ravencoin.getInstance();
if (!ravencoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return ravencoin.getWalletTransactions(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/unusedaddress")
@Operation(
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "BIP32 'm' private/public key in base58",
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
)
)
),
responses = {
@ApiResponse(
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getUnusedRavencoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
Security.checkApiCallAllowed(request);
Ravencoin ravencoin = Ravencoin.getInstance();
if (!ravencoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
try {
return ravencoin.getUnusedReceiveAddress(key58);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
}
@POST
@Path("/send")
@Operation(
summary = "Sends RVN from hierarchical, deterministic BIP32 wallet to specific address",
description = "Currently only supports 'legacy' P2PKH Ravencoin addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = RavencoinSendRequest.class
)
)
),
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
)
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, RavencoinSendRequest ravencoinSendRequest) {
Security.checkApiCallAllowed(request);
if (ravencoinSendRequest.ravencoinAmount <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
if (ravencoinSendRequest.feePerByte != null && ravencoinSendRequest.feePerByte <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
Ravencoin ravencoin = Ravencoin.getInstance();
if (!ravencoin.isValidAddress(ravencoinSendRequest.receivingAddress))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
if (!ravencoin.isValidDeterministicKey(ravencoinSendRequest.xprv58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
Transaction spendTransaction = ravencoin.buildSpend(ravencoinSendRequest.xprv58,
ravencoinSendRequest.receivingAddress,
ravencoinSendRequest.ravencoinAmount,
ravencoinSendRequest.feePerByte);
if (spendTransaction == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE);
try {
ravencoin.broadcastTransaction(spendTransaction);
} catch (ForeignBlockchainException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
}
return spendTransaction.getTxId().toString();
}
}

View File

@@ -115,6 +115,9 @@ public class CrossChainResource {
crossChainTrades.sort((a, b) -> Longs.compare(a.creationTimestamp, b.creationTimestamp));
}
// Remove any trades that have had too many failures
crossChainTrades = TradeBot.getInstance().removeFailedTrades(repository, crossChainTrades);
if (limit != null && limit > 0) {
// Make sure to not return more than the limit
int upperLimit = Math.min(limit, crossChainTrades.size());
@@ -129,6 +132,64 @@ public class CrossChainResource {
}
}
@GET
@Path("/tradeoffers/hidden")
@Operation(
summary = "Find cross-chain trade offers that have been hidden due to too many failures",
responses = {
@ApiResponse(
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = CrossChainTradeData.class
)
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
public List<CrossChainTradeData> getHiddenTradeOffers(
@Parameter(
description = "Limit to specific blockchain",
example = "LITECOIN",
schema = @Schema(implementation = SupportedBlockchain.class)
) @QueryParam("foreignBlockchain") SupportedBlockchain foreignBlockchain) {
final boolean isExecutable = true;
List<CrossChainTradeData> crossChainTrades = new ArrayList<>();
try (final Repository repository = RepositoryManager.getRepository()) {
Map<ByteArray, Supplier<ACCT>> acctsByCodeHash = SupportedBlockchain.getFilteredAcctMap(foreignBlockchain);
for (Map.Entry<ByteArray, Supplier<ACCT>> acctInfo : acctsByCodeHash.entrySet()) {
byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get();
List<ATData> atsData = repository.getATRepository().getATsByFunctionality(codeHash, isExecutable, null, null, null);
for (ATData atData : atsData) {
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atData);
if (crossChainTradeData.mode == AcctMode.OFFERING) {
crossChainTrades.add(crossChainTradeData);
}
}
}
// Sort the trades by timestamp
crossChainTrades.sort((a, b) -> Longs.compare(a.creationTimestamp, b.creationTimestamp));
// Remove trades that haven't failed
crossChainTrades.removeIf(t -> !TradeBot.getInstance().isFailedTrade(repository, t));
crossChainTrades.stream().forEach(CrossChainResource::decorateTradeDataWithPresence);
return crossChainTrades;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/trade/{ataddress}")
@Operation(

View File

@@ -11,6 +11,7 @@ import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.servlet.http.HttpServletRequest;
@@ -38,10 +39,14 @@ import org.qortal.crypto.Crypto;
import org.qortal.data.at.ATData;
import org.qortal.data.crosschain.CrossChainTradeData;
import org.qortal.data.crosschain.TradeBotData;
import org.qortal.data.transaction.MessageTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
@Path("/crosschain/tradebot")
@Tag(name = "Cross-Chain (Trade-Bot)")
@@ -137,7 +142,8 @@ public class CrossChainTradeBotResource {
if (tradeBotCreateRequest.qortAmount <= 0 || tradeBotCreateRequest.fundingQortAmount <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ORDER_SIZE_TOO_SMALL);
if (!Controller.getInstance().isUpToDate())
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
try (final Repository repository = RepositoryManager.getRepository()) {
@@ -153,7 +159,7 @@ public class CrossChainTradeBotResource {
return Base58.encode(unsignedBytes);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
}
}
@@ -198,7 +204,8 @@ public class CrossChainTradeBotResource {
if (tradeBotRespondRequest.receivingAddress == null || !Crypto.isValidAddress(tradeBotRespondRequest.receivingAddress))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
if (!Controller.getInstance().isUpToDate())
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
// Extract data from cross-chain trading AT
@@ -220,6 +227,17 @@ public class CrossChainTradeBotResource {
if (crossChainTradeData.mode != AcctMode.OFFERING)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
// Check if there is a buy or a cancel request in progress for this trade
List<Transaction.TransactionType> txTypes = List.of(Transaction.TransactionType.MESSAGE);
List<TransactionData> unconfirmed = repository.getTransactionRepository().getUnconfirmedTransactions(txTypes, null, 0, 0, false);
for (TransactionData transactionData : unconfirmed) {
MessageTransactionData messageTransactionData = (MessageTransactionData) transactionData;
if (Objects.equals(messageTransactionData.getRecipient(), atAddress)) {
// There is a pending request for this trade, so block this buy attempt to reduce the risk of refunds
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Trade has an existing buy request or is pending cancellation.");
}
}
AcctTradeBot.ResponseResult result = TradeBot.getInstance().startResponse(repository, atData, acct, crossChainTradeData,
tradeBotRespondRequest.foreignKey, tradeBotRespondRequest.receivingAddress);
@@ -237,7 +255,7 @@ public class CrossChainTradeBotResource {
return "false";
}
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
}
}

View File

@@ -0,0 +1,96 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.controller.DevProxyManager;
import org.qortal.repository.DataException;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
@Path("/developer")
@Tag(name = "Developer Tools")
public class DeveloperResource {
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
@POST
@Path("/proxy/start")
@Operation(
summary = "Start proxy server, for real time QDN app/website development",
requestBody = @RequestBody(
description = "Host and port of source webserver to be proxied",
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
example = "127.0.0.1:5173"
)
)
),
responses = {
@ApiResponse(
description = "Port number of running server",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "number"
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA})
public Integer startProxy(String sourceHostAndPort) {
// TODO: API key
DevProxyManager devProxyManager = DevProxyManager.getInstance();
try {
devProxyManager.setSourceHostAndPort(sourceHostAndPort);
devProxyManager.start();
return devProxyManager.getPort();
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, e.getMessage());
}
}
@POST
@Path("/proxy/stop")
@Operation(
summary = "Stop proxy server",
responses = {
@ApiResponse(
description = "true if stopped",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "boolean"
)
)
)
}
)
public boolean stopProxy() {
DevProxyManager devProxyManager = DevProxyManager.getInstance();
devProxyManager.stop();
return !devProxyManager.isRunning();
}
}

View File

@@ -98,7 +98,15 @@ public class GroupsResource {
ref = "reverse"
) @QueryParam("reverse") Boolean reverse) {
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getGroupRepository().getAllGroups(limit, offset, reverse);
List<GroupData> allGroupData = repository.getGroupRepository().getAllGroups(limit, offset, reverse);
allGroupData.forEach(groupData -> {
try {
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupData.getGroupId());
} catch (DataException e) {
// Exclude memberCount for this group
}
});
return allGroupData;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
@@ -150,7 +158,15 @@ public class GroupsResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getGroupRepository().getGroupsWithMember(member);
List<GroupData> allGroupData = repository.getGroupRepository().getGroupsWithMember(member);
allGroupData.forEach(groupData -> {
try {
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupData.getGroupId());
} catch (DataException e) {
// Exclude memberCount for this group
}
});
return allGroupData;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
@@ -177,6 +193,7 @@ public class GroupsResource {
if (groupData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.GROUP_UNKNOWN);
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupId);
return groupData;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
@@ -922,4 +939,4 @@ public class GroupsResource {
}
}
}
}

View File

@@ -26,6 +26,7 @@ import org.qortal.api.ApiErrors;
import org.qortal.api.ApiException;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.model.NameSummary;
import org.qortal.controller.LiteNode;
import org.qortal.crypto.Crypto;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.BuyNameTransactionData;
@@ -46,6 +47,7 @@ import org.qortal.transform.transaction.RegisterNameTransactionTransformer;
import org.qortal.transform.transaction.SellNameTransactionTransformer;
import org.qortal.transform.transaction.UpdateNameTransactionTransformer;
import org.qortal.utils.Base58;
import org.qortal.utils.Unicode;
@Path("/names")
@Tag(name = "Names")
@@ -62,19 +64,19 @@ public class NamesResource {
description = "registered name info",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
array = @ArraySchema(schema = @Schema(implementation = NameSummary.class))
array = @ArraySchema(schema = @Schema(implementation = NameData.class))
)
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public List<NameSummary> getAllNames(@Parameter(ref = "limit") @QueryParam("limit") Integer limit, @Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref="reverse") @QueryParam("reverse") Boolean reverse) {
public List<NameData> getAllNames(@Parameter(description = "Return only names registered or updated after timestamp") @QueryParam("after") Long after,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref="reverse") @QueryParam("reverse") Boolean reverse) {
try (final Repository repository = RepositoryManager.getRepository()) {
List<NameData> names = repository.getNameRepository().getAllNames(limit, offset, reverse);
// Convert to summary
return names.stream().map(NameSummary::new).collect(Collectors.toList());
return repository.getNameRepository().getAllNames(after, limit, offset, reverse);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
@@ -101,7 +103,14 @@ public class NamesResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
try (final Repository repository = RepositoryManager.getRepository()) {
List<NameData> names = repository.getNameRepository().getNamesByOwner(address, limit, offset, reverse);
List<NameData> names;
if (Settings.getInstance().isLite()) {
names = LiteNode.getInstance().fetchAccountNames(address);
}
else {
names = repository.getNameRepository().getNamesByOwner(address, limit, offset, reverse);
}
return names.stream().map(NameSummary::new).collect(Collectors.toList());
} catch (DataException e) {
@@ -126,10 +135,19 @@ public class NamesResource {
@ApiErrors({ApiError.NAME_UNKNOWN, ApiError.REPOSITORY_ISSUE})
public NameData getName(@PathParam("name") String name) {
try (final Repository repository = RepositoryManager.getRepository()) {
NameData nameData = repository.getNameRepository().fromName(name);
NameData nameData;
String reducedName = Unicode.sanitize(name);
if (nameData == null)
if (Settings.getInstance().isLite()) {
nameData = LiteNode.getInstance().fetchNameData(name);
}
else {
nameData = repository.getNameRepository().fromReducedName(reducedName);
}
if (nameData == null) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NAME_UNKNOWN);
}
return nameData;
} catch (ApiException e) {
@@ -139,6 +157,41 @@ public class NamesResource {
}
}
@GET
@Path("/search")
@Operation(
summary = "Search registered names",
responses = {
@ApiResponse(
description = "registered name info",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
array = @ArraySchema(schema = @Schema(implementation = NameData.class))
)
)
}
)
@ApiErrors({ApiError.NAME_UNKNOWN, ApiError.REPOSITORY_ISSUE})
public List<NameData> searchNames(@QueryParam("query") String query,
@Parameter(description = "Prefix only (if true, only the beginning of the name is matched)") @QueryParam("prefix") Boolean prefixOnly,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref="reverse") @QueryParam("reverse") Boolean reverse) {
try (final Repository repository = RepositoryManager.getRepository()) {
if (query == null) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Missing query");
}
boolean usePrefixOnly = Boolean.TRUE.equals(prefixOnly);
return repository.getNameRepository().searchNames(query, usePrefixOnly, limit, offset, reverse);
} catch (ApiException e) {
throw e;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@POST
@Path("/register")
@@ -394,4 +447,4 @@ public class NamesResource {
}
}
}
}

View File

@@ -20,6 +20,11 @@ import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.core.config.Configuration;
import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.logging.log4j.core.LoggerContext;
import org.qortal.api.*;
import org.qortal.api.model.ConnectedPeer;
import org.qortal.api.model.PeersSummary;
@@ -127,9 +132,29 @@ public class PeersResource {
}
)
@SecurityRequirement(name = "apiKey")
public ExecuteProduceConsume.StatsSnapshot getEngineStats(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
public ExecuteProduceConsume.StatsSnapshot getEngineStats(@HeaderParam(Security.API_KEY_HEADER) String apiKey, @QueryParam("newLoggingLevel") Level newLoggingLevel) {
Security.checkApiCallAllowed(request);
if (newLoggingLevel != null) {
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
final Configuration config = ctx.getConfiguration();
String epcClassName = "org.qortal.network.Network.NetworkProcessor";
LoggerConfig loggerConfig = config.getLoggerConfig(epcClassName);
LoggerConfig specificConfig = loggerConfig;
// We need a specific configuration for this logger,
// otherwise we would change the level of all other loggers
// having the original configuration as parent as well
if (!loggerConfig.getName().equals(epcClassName)) {
specificConfig = new LoggerConfig(epcClassName, newLoggingLevel, true);
specificConfig.setParent(loggerConfig);
config.addLogger(epcClassName, specificConfig);
}
specificConfig.setLevel(newLoggingLevel);
ctx.updateLoggers();
}
return Network.getInstance().getStatsSnapshot();
}

View File

@@ -0,0 +1,258 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.data.transaction.CreatePollTransactionData;
import org.qortal.data.transaction.PaymentTransactionData;
import org.qortal.data.transaction.VoteOnPollTransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.transform.TransformationException;
import org.qortal.transform.transaction.CreatePollTransactionTransformer;
import org.qortal.transform.transaction.PaymentTransactionTransformer;
import org.qortal.transform.transaction.VoteOnPollTransactionTransformer;
import org.qortal.utils.Base58;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.media.ArraySchema;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.ws.rs.GET;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import org.qortal.api.ApiException;
import org.qortal.api.model.PollVotes;
import org.qortal.data.voting.PollData;
import org.qortal.data.voting.PollOptionData;
import org.qortal.data.voting.VoteOnPollData;
@Path("/polls")
@Tag(name = "Polls")
public class PollsResource {
@Context
HttpServletRequest request;
@GET
@Operation(
summary = "List all polls",
responses = {
@ApiResponse(
description = "poll info",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
array = @ArraySchema(schema = @Schema(implementation = PollData.class))
)
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public List<PollData> getAllPolls(@Parameter(
ref = "limit"
) @QueryParam("limit") Integer limit, @Parameter(
ref = "offset"
) @QueryParam("offset") Integer offset, @Parameter(
ref = "reverse"
) @QueryParam("reverse") Boolean reverse) {
try (final Repository repository = RepositoryManager.getRepository()) {
List<PollData> allPollData = repository.getVotingRepository().getAllPolls(limit, offset, reverse);
return allPollData;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/{pollName}")
@Operation(
summary = "Info on poll",
responses = {
@ApiResponse(
description = "poll info",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(implementation = PollData.class)
)
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public PollData getPollData(@PathParam("pollName") String pollName) {
try (final Repository repository = RepositoryManager.getRepository()) {
PollData pollData = repository.getVotingRepository().fromPollName(pollName);
if (pollData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.POLL_NO_EXISTS);
return pollData;
} catch (ApiException e) {
throw e;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/votes/{pollName}")
@Operation(
summary = "Votes on poll",
responses = {
@ApiResponse(
description = "poll votes",
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(implementation = PollVotes.class)
)
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
public PollVotes getPollVotes(@PathParam("pollName") String pollName, @QueryParam("onlyCounts") Boolean onlyCounts) {
try (final Repository repository = RepositoryManager.getRepository()) {
PollData pollData = repository.getVotingRepository().fromPollName(pollName);
if (pollData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.POLL_NO_EXISTS);
List<VoteOnPollData> votes = repository.getVotingRepository().getVotes(pollName);
// Initialize map for counting votes
Map<String, Integer> voteCountMap = new HashMap<>();
for (PollOptionData optionData : pollData.getPollOptions()) {
voteCountMap.put(optionData.getOptionName(), 0);
}
int totalVotes = 0;
for (VoteOnPollData vote : votes) {
String selectedOption = pollData.getPollOptions().get(vote.getOptionIndex()).getOptionName();
if (voteCountMap.containsKey(selectedOption)) {
voteCountMap.put(selectedOption, voteCountMap.get(selectedOption) + 1);
totalVotes++;
}
}
// Convert map to list of VoteInfo
List<PollVotes.OptionCount> voteCounts = voteCountMap.entrySet().stream()
.map(entry -> new PollVotes.OptionCount(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
if (onlyCounts != null && onlyCounts) {
return new PollVotes(null, totalVotes, voteCounts);
} else {
return new PollVotes(votes, totalVotes, voteCounts);
}
} catch (ApiException e) {
throw e;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@POST
@Path("/create")
@Operation(
summary = "Build raw, unsigned, CREATE_POLL transaction",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = CreatePollTransactionData.class
)
)
),
responses = {
@ApiResponse(
description = "raw, unsigned, CREATE_POLL transaction encoded in Base58",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@ApiErrors({ApiError.NON_PRODUCTION, ApiError.TRANSACTION_INVALID, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE})
public String CreatePoll(CreatePollTransactionData transactionData) {
if (Settings.getInstance().isApiRestricted())
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NON_PRODUCTION);
try (final Repository repository = RepositoryManager.getRepository()) {
Transaction transaction = Transaction.fromData(repository, transactionData);
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
if (result != Transaction.ValidationResult.OK)
throw TransactionsResource.createTransactionInvalidException(request, result);
byte[] bytes = CreatePollTransactionTransformer.toBytes(transactionData);
return Base58.encode(bytes);
} catch (TransformationException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@POST
@Path("/vote")
@Operation(
summary = "Build raw, unsigned, VOTE_ON_POLL transaction",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = VoteOnPollTransactionData.class
)
)
),
responses = {
@ApiResponse(
description = "raw, unsigned, VOTE_ON_POLL transaction encoded in Base58",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@ApiErrors({ApiError.NON_PRODUCTION, ApiError.TRANSACTION_INVALID, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE})
public String VoteOnPoll(VoteOnPollTransactionData transactionData) {
if (Settings.getInstance().isApiRestricted())
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NON_PRODUCTION);
try (final Repository repository = RepositoryManager.getRepository()) {
Transaction transaction = Transaction.fromData(repository, transactionData);
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
if (result != Transaction.ValidationResult.OK)
throw TransactionsResource.createTransactionInvalidException(request, result);
byte[] bytes = VoteOnPollTransactionTransformer.toBytes(transactionData);
return Base58.encode(bytes);
} catch (TransformationException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
}

View File

@@ -0,0 +1,70 @@
package org.qortal.api.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.*;
import org.qortal.block.BlockChain;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.utils.Amounts;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.math.BigDecimal;
import java.util.List;
@Path("/stats")
@Tag(name = "Stats")
public class StatsResource {
private static final Logger LOGGER = LogManager.getLogger(StatsResource.class);
@Context
HttpServletRequest request;
@GET
@Path("/supply/circulating")
@Operation(
summary = "Fetch circulating QORT supply",
responses = {
@ApiResponse(
description = "circulating supply of QORT",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", format = "number"))
)
}
)
public BigDecimal circulatingSupply() {
long total = 0L;
try (final Repository repository = RepositoryManager.getRepository()) {
int currentHeight = repository.getBlockRepository().getBlockchainHeight();
List<BlockChain.RewardByHeight> rewardsByHeight = BlockChain.getInstance().getBlockRewardsByHeight();
int rewardIndex = rewardsByHeight.size() - 1;
BlockChain.RewardByHeight rewardInfo = rewardsByHeight.get(rewardIndex);
for (int height = currentHeight; height > 1; --height) {
if (height < rewardInfo.height) {
--rewardIndex;
rewardInfo = rewardsByHeight.get(rewardIndex);
}
total += rewardInfo.reward;
}
return Amounts.toBigDecimal(total);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
}

View File

@@ -9,29 +9,27 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.io.IOException;
import java.io.StringWriter;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiException;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.*;
import org.qortal.api.model.SimpleTransactionSignRequest;
import org.qortal.controller.Controller;
import org.qortal.controller.LiteNode;
import org.qortal.crypto.Crypto;
import org.qortal.data.transaction.TransactionData;
import org.qortal.globalization.Translator;
import org.qortal.repository.DataException;
@@ -217,10 +215,25 @@ public class TransactionsResource {
}
try (final Repository repository = RepositoryManager.getRepository()) {
if (repository.getBlockRepository().getHeightFromSignature(signature) == 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
// Check if the block exists in either the database or archive
int height = repository.getBlockRepository().getHeightFromSignature(signature);
if (height == 0) {
height = repository.getBlockArchiveRepository().getHeightFromSignature(signature);
if (height == 0) {
// Not found in either the database or archive
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
}
}
return repository.getBlockRepository().getTransactionsFromSignature(signature, limit, offset, reverse);
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, height, height);
// Expand signatures to transactions
List<TransactionData> transactions = new ArrayList<>(signatures.size());
for (byte[] s : signatures) {
transactions.add(repository.getTransactionRepository().fromSignature(s));
}
return transactions;
} catch (ApiException e) {
throw e;
} catch (DataException e) {
@@ -250,14 +263,29 @@ public class TransactionsResource {
ApiError.REPOSITORY_ISSUE
})
public List<TransactionData> getUnconfirmedTransactions(@Parameter(
description = "A list of transaction types"
) @QueryParam("txType") List<TransactionType> txTypes, @Parameter(
description = "Transaction creator's base58 encoded public key"
) @QueryParam("creator") String creatorPublicKey58, @Parameter(
ref = "limit"
) @QueryParam("limit") Integer limit, @Parameter(
ref = "offset"
) @QueryParam("offset") Integer offset, @Parameter(
ref = "reverse"
) @QueryParam("reverse") Boolean reverse) {
// Decode public key if supplied
byte[] creatorPublicKey = null;
if (creatorPublicKey58 != null) {
try {
creatorPublicKey = Base58.decode(creatorPublicKey58);
} catch (NumberFormatException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY, e);
}
}
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getTransactionRepository().getUnconfirmedTransactions(limit, offset, reverse);
return repository.getTransactionRepository().getUnconfirmedTransactions(txTypes, creatorPublicKey, limit, offset, reverse);
} catch (ApiException e) {
throw e;
} catch (DataException e) {
@@ -366,6 +394,73 @@ public class TransactionsResource {
}
}
@GET
@Path("/address/{address}")
@Operation(
summary = "Returns transactions for given address",
responses = {
@ApiResponse(
description = "transactions",
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = TransactionData.class
)
)
)
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
public List<TransactionData> getAddressTransactions(@PathParam("address") String address,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
if (!Crypto.isValidAddress(address)) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
}
if (limit == null) {
limit = 0;
}
if (offset == null) {
offset = 0;
}
List<TransactionData> transactions;
if (Settings.getInstance().isLite()) {
// Fetch from network
transactions = LiteNode.getInstance().fetchAccountTransactions(address, limit, offset);
// Sort the data, since we can't guarantee the order that a peer sent it in
if (reverse) {
transactions.sort(Comparator.comparingLong(TransactionData::getTimestamp).reversed());
} else {
transactions.sort(Comparator.comparingLong(TransactionData::getTimestamp));
}
}
else {
// Fetch from local db
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null,
null, null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED, limit, offset, reverse);
// Expand signatures to transactions
transactions = new ArrayList<>(signatures.size());
for (byte[] signature : signatures) {
transactions.add(repository.getTransactionRepository().fromSignature(signature));
}
} catch (ApiException e) {
throw e;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
return transactions;
}
@GET
@Path("/unitfee")
@Operation(
@@ -624,7 +719,7 @@ public class TransactionsResource {
),
responses = {
@ApiResponse(
description = "true if accepted, false otherwise",
description = "For API version 1, this returns true if accepted.\nFor API version 2, the transactionData is returned as a JSON string if accepted.",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
@@ -637,10 +732,12 @@ public class TransactionsResource {
@ApiErrors({
ApiError.BLOCKCHAIN_NEEDS_SYNC, ApiError.INVALID_SIGNATURE, ApiError.INVALID_DATA, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE
})
public String processTransaction(String rawBytes58) {
// Only allow a transaction to be processed if our latest block is less than 30 minutes old
public String processTransaction(String rawBytes58, @HeaderParam(ApiService.API_VERSION_HEADER) String apiVersionHeader) {
int apiVersion = ApiService.getApiVersion(request);
// Only allow a transaction to be processed if our latest block is less than 60 minutes old
// If older than this, we should first wait until the blockchain is synced
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
@@ -663,7 +760,7 @@ public class TransactionsResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_SIGNATURE);
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS))
if (!blockchainLock.tryLock(60, TimeUnit.SECONDS))
throw createTransactionInvalidException(request, ValidationResult.NO_BLOCKCHAIN_LOCK);
try {
@@ -674,13 +771,27 @@ public class TransactionsResource {
blockchainLock.unlock();
}
return "true";
switch (apiVersion) {
case 1:
return "true";
case 2:
default:
// Marshall transactionData to string
StringWriter stringWriter = new StringWriter();
ApiRequest.marshall(stringWriter, transactionData);
return stringWriter.toString();
}
} catch (NumberFormatException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA, e);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
} catch (InterruptedException e) {
throw createTransactionInvalidException(request, ValidationResult.NO_BLOCKCHAIN_LOCK);
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
}
}

View File

@@ -1,4 +1,4 @@
package org.qortal.api.resource;
package org.qortal.api.restricted.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
@@ -20,6 +20,7 @@ import java.time.LocalDate;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -31,10 +32,13 @@ import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.appender.RollingFileAppender;
import org.json.JSONArray;
import org.json.JSONObject;
import org.qortal.account.Account;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.*;
@@ -42,9 +46,11 @@ import org.qortal.api.model.ActivitySummary;
import org.qortal.api.model.NodeInfo;
import org.qortal.api.model.NodeStatus;
import org.qortal.block.BlockChain;
import org.qortal.controller.AutoUpdate;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.BlockArchiveRebuilder;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.network.Network;
@@ -119,10 +125,23 @@ public class AdminResource {
nodeInfo.buildTimestamp = Controller.getInstance().getBuildTimestamp();
nodeInfo.nodeId = Network.getInstance().getOurNodeId();
nodeInfo.isTestNet = Settings.getInstance().isTestNet();
nodeInfo.type = getNodeType();
return nodeInfo;
}
private String getNodeType() {
if (Settings.getInstance().isLite()) {
return "lite";
}
else if (Settings.getInstance().isTopOnly()) {
return "topOnly";
}
else {
return "full";
}
}
@GET
@Path("/status")
@Operation(
@@ -139,6 +158,53 @@ public class AdminResource {
return nodeStatus;
}
@GET
@Path("/settings")
@Operation(
summary = "Fetch node settings",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = Settings.class))
)
}
)
public Settings settings() {
Settings nodeSettings = Settings.getInstance();
return nodeSettings;
}
@GET
@Path("/settings/{setting}")
@Operation(
summary = "Fetch a single node setting",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
public String setting(@PathParam("setting") String setting) {
try {
Object settingValue = FieldUtils.readField(Settings.getInstance(), setting, true);
if (settingValue == null) {
return "null";
}
else if (settingValue instanceof String[]) {
JSONArray array = new JSONArray(settingValue);
return array.toString(4);
}
else if (settingValue instanceof List) {
JSONArray array = new JSONArray((List<Object>) settingValue);
return array.toString(4);
}
return settingValue.toString();
} catch (IllegalAccessException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
}
}
@GET
@Path("/stop")
@Operation(
@@ -169,6 +235,37 @@ public class AdminResource {
return "true";
}
@GET
@Path("/restart")
@Operation(
summary = "Restart",
description = "Restart",
responses = {
@ApiResponse(
description = "\"true\"",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
@SecurityRequirement(name = "apiKey")
public String restart(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
Security.checkApiCallAllowed(request);
new Thread(() -> {
// Short sleep to allow HTTP response body to be emitted
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Not important
}
AutoUpdate.attemptRestart();
}).start();
return "true";
}
@GET
@Path("/summary")
@Operation(
@@ -209,6 +306,42 @@ public class AdminResource {
}
}
@GET
@Path("/summary/alltime")
@Operation(
summary = "Summary of activity since genesis",
responses = {
@ApiResponse(
content = @Content(schema = @Schema(implementation = ActivitySummary.class))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public ActivitySummary allTimeSummary(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
Security.checkApiCallAllowed(request);
ActivitySummary summary = new ActivitySummary();
try (final Repository repository = RepositoryManager.getRepository()) {
int startHeight = 1;
long start = repository.getBlockRepository().fromHeight(startHeight).getTimestamp();
int endHeight = repository.getBlockRepository().getBlockchainHeight();
summary.setBlockCount(endHeight - startHeight);
summary.setTransactionCountByType(repository.getTransactionRepository().getTransactionSummary(startHeight + 1, endHeight));
summary.setAssetsIssued(repository.getAssetRepository().getRecentAssetIds(start).size());
summary.setNamesRegistered (repository.getNameRepository().getRecentNames(start).size());
return summary;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@GET
@Path("/enginestats")
@Operation(
@@ -381,6 +514,10 @@ public class AdminResource {
) @QueryParam("limit") Integer limit, @Parameter(
ref = "offset"
) @QueryParam("offset") Integer offset, @Parameter(
name = "tail",
description = "Fetch most recent log lines",
schema = @Schema(type = "boolean")
) @QueryParam("tail") Boolean tail, @Parameter(
ref = "reverse"
) @QueryParam("reverse") Boolean reverse) {
LoggerContext loggerContext = (LoggerContext) LogManager.getContext();
@@ -396,6 +533,13 @@ public class AdminResource {
if (reverse != null && reverse)
logLines = Lists.reverse(logLines);
// Tail mode - return the last X lines (where X = limit)
if (tail != null && tail) {
if (limit != null && limit > 0) {
offset = logLines.size() - limit;
}
}
// offset out of bounds?
if (offset != null && (offset < 0 || offset >= logLines.size()))
return "";
@@ -416,7 +560,7 @@ public class AdminResource {
limit = Math.min(limit, logLines.size());
logLines.subList(limit - 1, logLines.size()).clear();
logLines.subList(limit, logLines.size()).clear();
return String.join("\n", logLines);
} catch (IOException e) {
@@ -588,10 +732,6 @@ public class AdminResource {
public String importRepository(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String filename) {
Security.checkApiCallAllowed(request);
// Hard-coded because it's too dangerous to allow user-supplied filenames in weaker security contexts
if (Settings.getInstance().getApiKey() == null)
filename = "qortal-backup/TradeBotStates.json";
try (final Repository repository = RepositoryManager.getRepository()) {
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
@@ -678,6 +818,64 @@ public class AdminResource {
}
}
@POST
@Path("/repository/archive/rebuild")
@Operation(
summary = "Rebuild archive",
description = "Rebuilds archive files, using the specified serialization version",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "number", example = "2"
)
)
),
responses = {
@ApiResponse(
description = "\"true\"",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey, Integer serializationVersion) {
Security.checkApiCallAllowed(request);
// Default serialization version to value specified in settings
if (serializationVersion == null) {
serializationVersion = Settings.getInstance().getDefaultArchiveVersion();
}
try {
// We don't actually need to lock the blockchain here, but we'll do it anyway so that
// the node can focus on rebuilding rather than synchronizing / minting.
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
blockchainLock.lockInterruptibly();
try {
BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(serializationVersion);
blockArchiveRebuilder.start();
return "true";
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
} finally {
blockchainLock.unlock();
}
} catch (InterruptedException e) {
// We couldn't lock blockchain to perform rebuild
return "false";
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@DELETE
@Path("/repository")
@Operation(
@@ -708,6 +906,49 @@ public class AdminResource {
}
}
@POST
@Path("/repository/importarchivedtrades")
@Operation(
summary = "Imports archived trades from TradeBotStatesArchive.json",
description = "This can be used to recover trades that exist in the archive only, which may be needed if a<br />" +
"problem occurred during the proof-of-work computation stage of a buy request.",
responses = {
@ApiResponse(
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public boolean importArchivedTrades(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
Security.checkApiCallAllowed(request);
try (final Repository repository = RepositoryManager.getRepository()) {
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
blockchainLock.lockInterruptibly();
try {
repository.importDataFromFile("qortal-backup/TradeBotStatesArchive.json");
repository.saveChanges();
return true;
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
} finally {
blockchainLock.unlock();
}
} catch (InterruptedException e) {
// We couldn't lock blockchain to perform import
return false;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@POST
@Path("/apikey/generate")

View File

@@ -1,4 +1,4 @@
package org.qortal.api.resource;
package org.qortal.api.restricted.resource;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
@@ -60,7 +60,7 @@ public class BootstrapResource {
bootstrap.validateBlockchain();
return bootstrap.create();
} catch (DataException | InterruptedException | IOException e) {
} catch (Exception e) {
LOGGER.info("Unable to create bootstrap", e);
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
}

View File

@@ -1,4 +1,4 @@
package org.qortal.api.resource;
package org.qortal.api.restricted.resource;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
@@ -8,7 +8,6 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.io.*;
import java.nio.file.Paths;
import java.util.Map;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
@@ -28,8 +27,8 @@ import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
@@ -43,60 +42,6 @@ public class RenderResource {
@Context HttpServletResponse response;
@Context ServletContext context;
@POST
@Path("/preview")
@Operation(
summary = "Generate preview URL based on a user-supplied path and service",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string", example = "/Users/user/Documents/MyStaticWebsite"
)
)
),
responses = {
@ApiResponse(
description = "a temporary URL to preview the website",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@SecurityRequirement(name = "apiKey")
public String preview(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String directoryPath) {
Security.checkApiCallAllowed(request);
Method method = Method.PUT;
Compression compression = Compression.ZIP;
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath),
null, Service.WEBSITE, null, method, compression,
null, null, null, null);
try {
arbitraryDataWriter.save();
} catch (IOException | DataException | InterruptedException | MissingDataException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
} catch (RuntimeException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
}
ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
if (arbitraryDataFile != null) {
String digest58 = arbitraryDataFile.digest58();
if (digest58 != null) {
return "http://localhost:12393/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret());
}
}
return "Unable to generate preview URL";
}
@POST
@Path("/authorize/{resourceId}")
@SecurityRequirement(name = "apiKey")
@@ -140,8 +85,10 @@ public class RenderResource {
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, "/", null, "/render/signature", true, true, theme);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme);
}
@GET
@@ -149,8 +96,10 @@ public class RenderResource {
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, inPath,null, "/render/signature", true, true, theme);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme);
}
@GET
@@ -158,8 +107,10 @@ public class RenderResource {
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, "/", secret58, "/render/hash", true, false, theme);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme);
}
@GET
@@ -168,8 +119,10 @@ public class RenderResource {
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
@QueryParam("secret") String secret58,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, inPath, secret58, "/render/hash", true, false, theme);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme);
}
@GET
@@ -178,10 +131,13 @@ public class RenderResource {
public HttpServletResponse getPathByName(@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("path") String inPath,
@QueryParam("identifier") String identifier,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, name, service, null);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, inPath, null, prefix, true, true, theme);
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme);
}
@GET
@@ -189,19 +145,22 @@ public class RenderResource {
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("identifier") String identifier,
@QueryParam("theme") String theme) {
Security.requirePriorAuthorization(request, name, service, null);
if (!Settings.getInstance().isQDNAuthBypassEnabled())
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, "/", null, prefix, true, true, theme);
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme);
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async, String theme) {
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
secret58, prefix, includeResourceIdInPrefix, async, "render", request, response, context);
if (theme != null) {
renderer.setTheme(theme);

View File

@@ -2,7 +2,9 @@ package org.qortal.api.websocket;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.jetty.websocket.api.Session;
@@ -21,6 +23,8 @@ import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import static org.qortal.data.chat.ChatMessage.Encoding;
@WebSocket
@SuppressWarnings("serial")
public class ActiveChatsWebSocket extends ApiWebSocket {
@@ -62,7 +66,9 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
@OnWebSocketMessage
public void onWebSocketMessage(Session session, String message) {
/* ignored */
if (Objects.equals(message, "ping")) {
session.getRemote().sendStringByFuture("pong");
}
}
private void onNotify(Session session, ChatTransactionData chatTransactionData, String ourAddress, AtomicReference<String> previousOutput) {
@@ -75,7 +81,7 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
}
try (final Repository repository = RepositoryManager.getRepository()) {
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress);
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
StringWriter stringWriter = new StringWriter();
@@ -93,4 +99,12 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
}
}
private Encoding getTargetEncoding(Session session) {
// Default to Base58 if not specified, for backwards support
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
List<String> encodingList = queryParams.get("encoding");
String encoding = (encodingList != null && encodingList.size() == 1) ? encodingList.get(0) : "BASE58";
return Encoding.valueOf(encoding);
}
}

View File

@@ -2,10 +2,7 @@ package org.qortal.api.websocket;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.*;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.WebSocketException;
@@ -22,6 +19,8 @@ import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import static org.qortal.data.chat.ChatMessage.Encoding;
@WebSocket
@SuppressWarnings("serial")
public class ChatMessagesWebSocket extends ApiWebSocket {
@@ -35,6 +34,16 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
@Override
public void onWebSocketConnect(Session session) {
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
Encoding encoding = getTargetEncoding(session);
List<String> limitList = queryParams.get("limit");
Integer limit = (limitList != null && limitList.size() == 1) ? Integer.parseInt(limitList.get(0)) : null;
List<String> offsetList = queryParams.get("offset");
Integer offset = (offsetList != null && offsetList.size() == 1) ? Integer.parseInt(offsetList.get(0)) : null;
List<String> reverseList = queryParams.get("offset");
Boolean reverse = (reverseList != null && reverseList.size() == 1) ? Boolean.getBoolean(reverseList.get(0)) : null;
List<String> txGroupIds = queryParams.get("txGroupId");
if (txGroupIds != null && txGroupIds.size() == 1) {
@@ -46,7 +55,12 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
null,
txGroupId,
null,
null, null, null);
null,
null,
null,
null,
encoding,
limit, offset, reverse);
sendMessages(session, chatMessages);
} catch (DataException e) {
@@ -69,11 +83,16 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
try (final Repository repository = RepositoryManager.getRepository()) {
List<ChatMessage> chatMessages = repository.getChatRepository().getMessagesMatchingCriteria(
null,
null,
null,
null,
null,
null,
involvingAddresses,
null, null, null);
null,
encoding,
limit, offset, reverse);
sendMessages(session, chatMessages);
} catch (DataException e) {
@@ -99,7 +118,9 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
@OnWebSocketMessage
public void onWebSocketMessage(Session session, String message) {
/* ignored */
if (Objects.equals(message, "ping")) {
session.getRemote().sendStringByFuture("pong");
}
}
private void onNotify(Session session, ChatTransactionData chatTransactionData, int txGroupId) {
@@ -147,7 +168,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
// Convert ChatTransactionData to ChatMessage
ChatMessage chatMessage;
try (final Repository repository = RepositoryManager.getRepository()) {
chatMessage = repository.getChatRepository().toChatMessage(chatTransactionData);
chatMessage = repository.getChatRepository().toChatMessage(chatTransactionData, getTargetEncoding(session));
} catch (DataException e) {
// No output this time?
return;
@@ -156,4 +177,12 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
sendMessages(session, Collections.singletonList(chatMessage));
}
private Encoding getTargetEncoding(Session session) {
// Default to Base58 if not specified, for backwards support
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
List<String> encodingList = queryParams.get("encoding");
String encoding = (encodingList != null && encodingList.size() == 1) ? encodingList.get(0) : "BASE58";
return Encoding.valueOf(encoding);
}
}

View File

@@ -2,10 +2,7 @@ package org.qortal.api.websocket;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.stream.Collectors;
import org.eclipse.jetty.websocket.api.Session;
@@ -85,6 +82,7 @@ public class TradeBotWebSocket extends ApiWebSocket implements Listener {
@Override
public void onWebSocketConnect(Session session) {
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
List<String> foreignBlockchains = queryParams.get("foreignBlockchain");
final String foreignBlockchain = foreignBlockchains == null ? null : foreignBlockchains.get(0);
@@ -98,15 +96,22 @@ public class TradeBotWebSocket extends ApiWebSocket implements Listener {
// save session's preferred blockchain (if any)
sessionBlockchain.put(session, foreignBlockchain);
// Send all known trade-bot entries
try (final Repository repository = RepositoryManager.getRepository()) {
List<TradeBotData> tradeBotEntries = repository.getCrossChainRepository().getAllTradeBotData();
// Optional filtering
if (foreignBlockchain != null)
tradeBotEntries = tradeBotEntries.stream()
.filter(tradeBotData -> tradeBotData.getForeignBlockchain().equals(foreignBlockchain))
.collect(Collectors.toList());
// Maybe send all known trade-bot entries
try (final Repository repository = RepositoryManager.getRepository()) {
List<TradeBotData> tradeBotEntries = new ArrayList<>();
// We might need to exclude the initial data from the response
if (!excludeInitialData) {
tradeBotEntries = repository.getCrossChainRepository().getAllTradeBotData();
// Optional filtering
if (foreignBlockchain != null)
tradeBotEntries = tradeBotEntries.stream()
.filter(tradeBotData -> tradeBotData.getForeignBlockchain().equals(foreignBlockchain))
.collect(Collectors.toList());
}
if (!sendEntries(session, tradeBotEntries)) {
session.close(4002, "websocket issue");

View File

@@ -24,6 +24,7 @@ import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
import org.qortal.api.model.CrossChainOfferSummary;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crosschain.SupportedBlockchain;
import org.qortal.crosschain.ACCT;
import org.qortal.crosschain.AcctMode;
@@ -173,6 +174,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
public void onWebSocketConnect(Session session) {
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
final boolean includeHistoric = queryParams.get("includeHistoric") != null;
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
List<String> foreignBlockchains = queryParams.get("foreignBlockchain");
final String foreignBlockchain = foreignBlockchains == null ? null : foreignBlockchains.get(0);
@@ -189,20 +191,23 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
List<CrossChainOfferSummary> crossChainOfferSummaries = new ArrayList<>();
synchronized (cachedInfoByBlockchain) {
Collection<CachedOfferInfo> cachedInfos;
// We might need to exclude the initial data from the response
if (!excludeInitialData) {
synchronized (cachedInfoByBlockchain) {
Collection<CachedOfferInfo> cachedInfos;
if (foreignBlockchain == null)
// No preferred blockchain, so iterate through all of them
cachedInfos = cachedInfoByBlockchain.values();
else
cachedInfos = Collections.singleton(cachedInfoByBlockchain.computeIfAbsent(foreignBlockchain, k -> new CachedOfferInfo()));
if (foreignBlockchain == null)
// No preferred blockchain, so iterate through all of them
cachedInfos = cachedInfoByBlockchain.values();
else
cachedInfos = Collections.singleton(cachedInfoByBlockchain.computeIfAbsent(foreignBlockchain, k -> new CachedOfferInfo()));
for (CachedOfferInfo cachedInfo : cachedInfos) {
crossChainOfferSummaries.addAll(cachedInfo.currentSummaries.values());
for (CachedOfferInfo cachedInfo : cachedInfos) {
crossChainOfferSummaries.addAll(cachedInfo.currentSummaries.values());
if (includeHistoric)
crossChainOfferSummaries.addAll(cachedInfo.historicSummaries.values());
if (includeHistoric)
crossChainOfferSummaries.addAll(cachedInfo.historicSummaries.values());
}
}
}
@@ -311,7 +316,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
throw new DataException("Couldn't fetch historic trades from repository");
for (ATStateData historicAtState : historicAtStates) {
CrossChainOfferSummary historicOfferSummary = produceSummary(repository, acct, historicAtState, null);
CrossChainOfferSummary historicOfferSummary = produceSummary(repository, acct, historicAtState, null, null);
if (!isHistoric.test(historicOfferSummary))
continue;
@@ -326,8 +331,10 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
}
}
private static CrossChainOfferSummary produceSummary(Repository repository, ACCT acct, ATStateData atState, Long timestamp) throws DataException {
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atState);
private static CrossChainOfferSummary produceSummary(Repository repository, ACCT acct, ATStateData atState, CrossChainTradeData crossChainTradeData, Long timestamp) throws DataException {
if (crossChainTradeData == null) {
crossChainTradeData = acct.populateTradeData(repository, atState);
}
long atStateTimestamp;
@@ -342,9 +349,16 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
private static List<CrossChainOfferSummary> produceSummaries(Repository repository, ACCT acct, List<ATStateData> atStates, Long timestamp) throws DataException {
List<CrossChainOfferSummary> offerSummaries = new ArrayList<>();
for (ATStateData atState : atStates) {
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atState);
for (ATStateData atState : atStates)
offerSummaries.add(produceSummary(repository, acct, atState, timestamp));
// Ignore trade if it has failed
if (TradeBot.getInstance().isFailedTrade(repository, crossChainTradeData)) {
continue;
}
offerSummaries.add(produceSummary(repository, acct, atState, crossChainTradeData, timestamp));
}
return offerSummaries;
}

View File

@@ -65,11 +65,15 @@ public class TradePresenceWebSocket extends ApiWebSocket implements Listener {
@Override
public void onWebSocketConnect(Session session) {
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
List<TradePresenceData> tradePresences;
List<TradePresenceData> tradePresences = new ArrayList<>();
synchronized (currentEntries) {
tradePresences = List.copyOf(currentEntries.values());
// We might need to exclude the initial data from the response
if (!excludeInitialData) {
synchronized (currentEntries) {
tradePresences = List.copyOf(currentEntries.values());
}
}
if (!sendTradePresences(session, tradePresences)) {

View File

@@ -2,6 +2,7 @@ package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.DataNotPublishedException;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataCache;
import org.qortal.arbitrary.misc.Service;
@@ -53,10 +54,6 @@ public class ArbitraryDataBuilder {
/**
* Process transactions, but do not build anything
* This is useful for checking the status of a given resource
*
* @throws DataException
* @throws IOException
* @throws MissingDataException
*/
public void process() throws DataException, IOException, MissingDataException {
this.fetchTransactions();
@@ -68,10 +65,6 @@ public class ArbitraryDataBuilder {
/**
* Build the latest state of a given resource
*
* @throws DataException
* @throws IOException
* @throws MissingDataException
*/
public void build() throws DataException, IOException, MissingDataException {
this.process();
@@ -88,7 +81,7 @@ public class ArbitraryDataBuilder {
if (latestPut == null) {
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
this.name, this.service, this.identifierString());
throw new DataException(message);
throw new DataNotPublishedException(message);
}
this.latestPutTransaction = latestPut;

View File

@@ -4,6 +4,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.crypto.Crypto;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
@@ -15,7 +16,6 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.*;
import java.util.stream.Stream;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
@@ -79,29 +79,38 @@ public class ArbitraryDataFile {
this.signature = signature;
}
public ArbitraryDataFile(byte[] fileContent, byte[] signature) throws DataException {
public ArbitraryDataFile(byte[] fileContent, byte[] signature, boolean useTemporaryFile) throws DataException {
if (fileContent == null) {
LOGGER.error("fileContent is null");
return;
}
this.chunks = new ArrayList<>();
this.hash58 = Base58.encode(Crypto.digest(fileContent));
this.signature = signature;
LOGGER.trace(String.format("File digest: %s, size: %d bytes", this.hash58, fileContent.length));
Path outputFilePath = getOutputFilePath(this.hash58, signature, true);
Path outputFilePath;
if (useTemporaryFile) {
try {
outputFilePath = Files.createTempFile("qortalRawData", null);
outputFilePath.toFile().deleteOnExit();
}
catch (IOException e) {
throw new DataException(String.format("Unable to write data with hash %s to temporary file: %s", this.hash58, e.getMessage()));
}
}
else {
outputFilePath = getOutputFilePath(this.hash58, signature, true);
}
File outputFile = outputFilePath.toFile();
try (FileOutputStream outputStream = new FileOutputStream(outputFile)) {
outputStream.write(fileContent);
this.filePath = outputFilePath;
// Verify hash
if (!this.hash58.equals(this.digest58())) {
LOGGER.error("Hash {} does not match file digest {}", this.hash58, this.digest58());
this.delete();
throw new DataException("Data file digest validation failed");
}
} catch (IOException e) {
throw new DataException("Unable to write data to file");
this.delete();
throw new DataException(String.format("Unable to write data with hash %s: %s", this.hash58, e.getMessage()));
}
}
@@ -116,6 +125,41 @@ public class ArbitraryDataFile {
return ArbitraryDataFile.fromHash58(Base58.encode(hash), signature);
}
public static ArbitraryDataFile fromRawData(byte[] data, byte[] signature) throws DataException {
if (data == null) {
return null;
}
return new ArbitraryDataFile(data, signature, true);
}
public static ArbitraryDataFile fromTransactionData(ArbitraryTransactionData transactionData) throws DataException {
ArbitraryDataFile arbitraryDataFile = null;
byte[] signature = transactionData.getSignature();
byte[] data = transactionData.getData();
if (data == null) {
return null;
}
// Create data file
switch (transactionData.getDataType()) {
case DATA_HASH:
arbitraryDataFile = ArbitraryDataFile.fromHash(data, signature);
break;
case RAW_DATA:
arbitraryDataFile = ArbitraryDataFile.fromRawData(data, signature);
break;
}
// Set metadata hash
if (arbitraryDataFile != null) {
arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash());
}
return arbitraryDataFile;
}
public static ArbitraryDataFile fromPath(Path path, byte[] signature) {
if (path == null) {
return null;
@@ -265,6 +309,11 @@ public class ArbitraryDataFile {
this.chunks = new ArrayList<>();
if (file != null) {
if (file.exists() && file.length() <= chunkSize) {
// No need to split into chunks if we're already below the chunk size
return 0;
}
try (FileInputStream fileInputStream = new FileInputStream(file);
BufferedInputStream bis = new BufferedInputStream(fileInputStream)) {
@@ -393,12 +442,15 @@ public class ArbitraryDataFile {
return false;
}
public boolean deleteAll() {
public boolean deleteAll(boolean deleteMetadata) {
// Delete the complete file
boolean fileDeleted = this.delete();
// Delete the metadata file
boolean metadataDeleted = this.deleteMetadata();
// Delete the metadata file if requested
boolean metadataDeleted = false;
if (deleteMetadata) {
metadataDeleted = this.deleteMetadata();
}
// Delete the individual chunks
boolean chunksDeleted = this.deleteAllChunks();
@@ -478,6 +530,14 @@ public class ArbitraryDataFile {
// Read the metadata
List<byte[]> chunks = metadata.getChunks();
// If the chunks array is empty, then this resource has no chunks,
// so we must return false to avoid confusing the caller.
if (chunks.isEmpty()) {
return false;
}
// Otherwise, we need to check each chunk individually
for (byte[] chunkHash : chunks) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
if (!chunk.exists()) {
@@ -609,6 +669,22 @@ public class ArbitraryDataFile {
return this.chunks.size();
}
public int fileCount() {
int fileCount = this.chunkCount();
if (fileCount == 0) {
// Transactions without any chunks can already be treated as a complete file
fileCount++;
}
if (this.getMetadataHash() != null) {
// Add the metadata file
fileCount++;
}
return fileCount;
}
public List<ArbitraryDataFileChunk> getChunks() {
return this.chunks;
}

View File

@@ -18,7 +18,7 @@ public class ArbitraryDataFileChunk extends ArbitraryDataFile {
}
public ArbitraryDataFileChunk(byte[] fileContent, byte[] signature) throws DataException {
super(fileContent, signature);
super(fileContent, signature, false);
}
public static ArbitraryDataFileChunk fromHash58(String hash58, byte[] signature) throws DataException {

View File

@@ -4,11 +4,11 @@ import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.DataNotPublishedException;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.crypto.AES;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
@@ -18,10 +18,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.settings.Settings;
import org.qortal.transform.Transformer;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.ZipUtils;
import org.qortal.utils.*;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
@@ -37,6 +34,9 @@ import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class ArbitraryDataReader {
@@ -59,6 +59,13 @@ public class ArbitraryDataReader {
private int layerCount;
private byte[] latestSignature;
// The resource being read
ArbitraryDataResource arbitraryDataResource = null;
// Track resources that are currently being loaded, to avoid duplicate concurrent builds
// TODO: all builds could be handled by the build queue (even synchronous ones), to avoid the need for this
private static Map<String, Long> inProgress = Collections.synchronizedMap(new HashMap<>());
public ArbitraryDataReader(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
// Ensure names are always lowercase
if (resourceIdType == ResourceIdType.NAME) {
@@ -115,6 +122,11 @@ public class ArbitraryDataReader {
return new ArbitraryDataBuildQueueItem(this.resourceId, this.resourceIdType, this.service, this.identifier);
}
private ArbitraryDataResource createArbitraryDataResource() {
return new ArbitraryDataResource(this.resourceId, this.resourceIdType, this.service, this.identifier);
}
/**
* loadAsynchronously
*
@@ -148,9 +160,6 @@ public class ArbitraryDataReader {
* If no exception is thrown, you can then use getFilePath() to access the data immediately after returning
*
* @param overwrite - set to true to force rebuild an existing cache
* @throws IOException
* @throws DataException
* @throws MissingDataException
*/
public void loadSynchronously(boolean overwrite) throws DataException, IOException, MissingDataException {
try {
@@ -162,6 +171,14 @@ public class ArbitraryDataReader {
return;
}
this.arbitraryDataResource = this.createArbitraryDataResource();
// Don't allow duplicate loads
if (!this.canStartLoading()) {
LOGGER.debug("Skipping duplicate load of {}", this.arbitraryDataResource);
return;
}
this.preExecute();
this.deleteExistingFiles();
this.fetch();
@@ -169,9 +186,18 @@ public class ArbitraryDataReader {
this.uncompress();
this.validate();
} catch (DataException e) {
} catch (DataNotPublishedException e) {
if (e.getMessage() != null) {
// Log the message only, to avoid spamming the logs with a full stack trace
LOGGER.debug("DataNotPublishedException when trying to load QDN resource: {}", e.getMessage());
}
this.deleteWorkingDirectory();
throw new DataException(e.getMessage());
throw e;
} catch (DataException e) {
LOGGER.info("DataException when trying to load QDN resource", e);
this.deleteWorkingDirectory();
throw e;
} finally {
this.postExecute();
@@ -180,6 +206,7 @@ public class ArbitraryDataReader {
private void preExecute() throws DataException {
ArbitraryDataBuildManager.getInstance().setBuildInProgress(true);
this.checkEnabled();
this.createWorkingDirectory();
this.createUncompressedDirectory();
@@ -187,6 +214,9 @@ public class ArbitraryDataReader {
private void postExecute() {
ArbitraryDataBuildManager.getInstance().setBuildInProgress(false);
this.arbitraryDataResource = this.createArbitraryDataResource();
ArbitraryDataReader.inProgress.remove(this.arbitraryDataResource.getUniqueKey());
}
private void checkEnabled() throws DataException {
@@ -195,6 +225,17 @@ public class ArbitraryDataReader {
}
}
private boolean canStartLoading() {
// Avoid duplicate builds if we're already loading this resource
String uniqueKey = this.arbitraryDataResource.getUniqueKey();
if (ArbitraryDataReader.inProgress.containsKey(uniqueKey)) {
return false;
}
ArbitraryDataReader.inProgress.put(uniqueKey, NTP.getTime());
return true;
}
private void createWorkingDirectory() throws DataException {
try {
Files.createDirectories(this.workingPath);
@@ -206,10 +247,14 @@ public class ArbitraryDataReader {
/**
* Working directory should only be deleted on failure, since it is currently used to
* serve a cached version of the resource for subsequent requests.
* @throws IOException
*/
private void deleteWorkingDirectory() throws IOException {
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
private void deleteWorkingDirectory() {
try {
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
} catch (IOException e) {
// Ignore failures as this isn't an essential step
LOGGER.info("Unable to delete working path {}: {}", this.workingPath, e.getMessage());
}
}
private void createUncompressedDirectory() throws DataException {
@@ -281,7 +326,7 @@ public class ArbitraryDataReader {
break;
default:
throw new DataException(String.format("Unknown resource ID type specified: %s", resourceIdType.toString()));
throw new DataException(String.format("Unknown resource ID type specified: %s", resourceIdType));
}
}
@@ -337,11 +382,6 @@ public class ArbitraryDataReader {
throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId));
}
// Load hashes
byte[] digest = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
byte[] signature = transactionData.getSignature();
// Load secret
byte[] secret = transactionData.getSecret();
if (secret != null) {
@@ -349,16 +389,17 @@ public class ArbitraryDataReader {
}
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData);
arbitraryDataFile.setMetadataHash(metadataHash);
if (arbitraryDataFile == null) {
throw new DataException(String.format("arbitraryDataFile is null"));
}
if (!arbitraryDataFile.allFilesExist()) {
if (ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())) {
if (ListUtils.isNameBlocked(transactionData.getName())) {
throw new DataException(
String.format("Unable to request missing data for file %s because the name is blocked", arbitraryDataFile));
}
else {
} else {
// Ask the arbitrary data manager to fetch data for this transaction
String message;
if (this.canRequestMissingFiles) {
@@ -369,8 +410,7 @@ public class ArbitraryDataReader {
} else {
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
}
}
else {
} else {
message = String.format("Missing data for file %s", arbitraryDataFile);
}
@@ -380,21 +420,25 @@ public class ArbitraryDataReader {
}
}
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
// We have all the chunks but not the complete file, so join them
arbitraryDataFile.join();
// Data hashes need some extra processing
if (transactionData.getDataType() == DataType.DATA_HASH) {
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
// We have all the chunks but not the complete file, so join them
arbitraryDataFile.join();
}
// If the complete file still doesn't exist then something went wrong
if (!arbitraryDataFile.exists()) {
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
}
// Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
// Delete the invalid file
arbitraryDataFile.delete();
throw new DataException("Unable to validate complete file hash");
}
}
// If the complete file still doesn't exist then something went wrong
if (!arbitraryDataFile.exists()) {
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
}
// Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
// Delete the invalid file
arbitraryDataFile.delete();
throw new DataException("Unable to validate complete file hash");
}
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
arbitraryDataFile.validateFileSize(transactionData.getSize());
@@ -408,6 +452,7 @@ public class ArbitraryDataReader {
this.decryptUsingAlgo("AES/CBC/PKCS5Padding");
} catch (DataException e) {
LOGGER.info("Unable to decrypt using specific parameters: {}", e.getMessage());
// Something went wrong, so fall back to default AES params (necessary for legacy resource support)
this.decryptUsingAlgo("AES");
@@ -420,9 +465,11 @@ public class ArbitraryDataReader {
byte[] secret = this.secret58 != null ? Base58.decode(this.secret58) : null;
if (secret != null && secret.length == Transformer.AES256_LENGTH) {
try {
LOGGER.debug("Decrypting {} using algorithm {}...", this.arbitraryDataResource, algorithm);
Path unencryptedPath = Paths.get(this.workingPath.toString(), "zipped.zip");
SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, algorithm);
SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, "AES");
AES.decryptFile(algorithm, aesKey, this.filePath.toString(), unencryptedPath.toString());
LOGGER.debug("Finished decrypting {} using algorithm {}", this.arbitraryDataResource, algorithm);
// Replace filePath pointer with the encrypted file path
// Don't delete the original ArbitraryDataFile, as this is handled in the cleanup phase
@@ -430,7 +477,8 @@ public class ArbitraryDataReader {
} catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException
| BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) {
throw new DataException(String.format("Unable to decrypt file at path %s: %s", this.filePath, e.getMessage()));
LOGGER.info(String.format("Exception when decrypting %s using algorithm %s", this.arbitraryDataResource, algorithm), e);
throw new DataException(String.format("Unable to decrypt file at path %s using algorithm %s: %s", this.filePath, algorithm, e.getMessage()));
}
} else {
// Assume it is unencrypted. This will be the case when we have built a custom path by combining
@@ -456,7 +504,9 @@ public class ArbitraryDataReader {
// Handle each type of compression
if (compression == Compression.ZIP) {
LOGGER.debug("Unzipping {}...", this.arbitraryDataResource);
ZipUtils.unzip(this.filePath.toString(), this.uncompressedPath.getParent().toString());
LOGGER.debug("Finished unzipping {}", this.arbitraryDataResource);
}
else if (compression == Compression.NONE) {
Files.createDirectories(this.uncompressedPath);
@@ -477,7 +527,12 @@ public class ArbitraryDataReader {
// Delete original compressed file
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
if (Files.exists(this.filePath)) {
Files.delete(this.filePath);
try {
Files.delete(this.filePath);
} catch (IOException e) {
// Ignore failures as this isn't an essential step
LOGGER.info("Unable to delete file at path {}", this.filePath);
}
}
}
@@ -487,10 +542,12 @@ public class ArbitraryDataReader {
private void validate() throws IOException, DataException {
if (this.service.isValidationRequired()) {
LOGGER.debug("Validating {}...", this.arbitraryDataResource);
Service.ValidationResult result = this.service.validate(this.filePath);
if (result != Service.ValidationResult.OK) {
throw new DataException(String.format("Validation of %s failed: %s", this.service, result.toString()));
}
LOGGER.debug("Finished validating {}", this.arbitraryDataResource);
}
}

View File

@@ -2,6 +2,7 @@ package org.qortal.arbitrary;
import com.google.common.io.Resources;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.HTMLParser;
@@ -34,36 +35,40 @@ public class ArbitraryDataRenderer {
private final String resourceId;
private final ResourceIdType resourceIdType;
private final Service service;
private final String identifier;
private String theme = "light";
private String inPath;
private final String secret58;
private final String prefix;
private final boolean usePrefix;
private final boolean includeResourceIdInPrefix;
private final boolean async;
private final String qdnContext;
private final HttpServletRequest request;
private final HttpServletResponse response;
private final ServletContext context;
public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async,
public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String qdnContext,
HttpServletRequest request, HttpServletResponse response, ServletContext context) {
this.resourceId = resourceId;
this.resourceIdType = resourceIdType;
this.service = service;
this.identifier = identifier != null ? identifier : "default";
this.inPath = inPath;
this.secret58 = secret58;
this.prefix = prefix;
this.usePrefix = usePrefix;
this.includeResourceIdInPrefix = includeResourceIdInPrefix;
this.async = async;
this.qdnContext = qdnContext;
this.request = request;
this.response = response;
this.context = context;
}
public HttpServletResponse render() {
if (!inPath.startsWith(File.separator)) {
inPath = File.separator + inPath;
if (!inPath.startsWith("/")) {
inPath = "/" + inPath;
}
// Don't render data if QDN is disabled
@@ -71,14 +76,14 @@ public class ArbitraryDataRenderer {
return ArbitraryDataRenderer.getResponse(response, 500, "QDN is disabled in settings");
}
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, null);
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, identifier);
arbitraryDataReader.setSecret58(secret58); // Optional, used for loading encrypted file hashes only
try {
if (!arbitraryDataReader.isCachedDataAvailable()) {
// If async is requested, show a loading screen whilst build is in progress
if (async) {
arbitraryDataReader.loadAsynchronously(false, 10);
return this.getLoadingResponse(service, resourceId, theme);
return this.getLoadingResponse(service, resourceId, identifier, theme);
}
// Otherwise, loop until we have data
@@ -111,23 +116,59 @@ public class ArbitraryDataRenderer {
}
String unzippedPath = path.toString();
// Set path automatically for single file resources (except for apps, which handle routing differently)
String[] files = ArrayUtils.removeElement(new File(unzippedPath).list(), ".qortal");
if (files.length == 1 && this.service != Service.APP) {
// This is a single file resource
inPath = files[0];
}
try {
String filename = this.getFilename(unzippedPath, inPath);
String filePath = Paths.get(unzippedPath, filename).toString();
Path filePath = Paths.get(unzippedPath, filename);
boolean usingCustomRouting = false;
// If the file doesn't exist, we may need to route the request elsewhere, or cleanup
if (!Files.exists(filePath)) {
if (inPath.equals("/")) {
// Delete the unzipped folder if no index file was found
try {
FileUtils.deleteDirectory(new File(unzippedPath));
} catch (IOException e) {
LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
}
}
// If this is an app, then forward all unhandled requests to the index, to give the app the option to route it
if (this.service == Service.APP) {
// Locate index file
List<String> indexFiles = ArbitraryDataRenderer.indexFiles();
for (String indexFile : indexFiles) {
Path indexPath = Paths.get(unzippedPath, indexFile);
if (Files.exists(indexPath)) {
// Forward request to index file
filePath = indexPath;
filename = indexFile;
usingCustomRouting = true;
break;
}
}
}
}
if (HTMLParser.isHtmlFile(filename)) {
// HTML file - needs to be parsed
byte[] data = Files.readAllBytes(Paths.get(filePath)); // TODO: limit file size that can be read into memory
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, usePrefix, data);
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
htmlParser.addAdditionalHeaderTags();
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' blob:");
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:;");
response.setContentType(context.getMimeType(filename));
response.setContentLength(htmlParser.getData().length);
response.getOutputStream().write(htmlParser.getData());
}
else {
// Regular file - can be streamed directly
File file = new File(filePath);
File file = filePath.toFile();
FileInputStream inputStream = new FileInputStream(file);
response.addHeader("Content-Security-Policy", "default-src 'self'");
response.setContentType(context.getMimeType(filename));
@@ -143,14 +184,6 @@ public class ArbitraryDataRenderer {
return response;
} catch (FileNotFoundException | NoSuchFileException e) {
LOGGER.info("Unable to serve file: {}", e.getMessage());
if (inPath.equals("/")) {
// Delete the unzipped folder if no index file was found
try {
FileUtils.deleteDirectory(new File(unzippedPath));
} catch (IOException ioException) {
LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
}
}
} catch (IOException e) {
LOGGER.info("Unable to serve file at path {}: {}", inPath, e.getMessage());
}
@@ -172,7 +205,7 @@ public class ArbitraryDataRenderer {
return userPath;
}
private HttpServletResponse getLoadingResponse(Service service, String name, String theme) {
private HttpServletResponse getLoadingResponse(Service service, String name, String identifier, String theme) {
String responseString = "";
URL url = Resources.getResource("loading/index.html");
try {
@@ -181,6 +214,7 @@ public class ArbitraryDataRenderer {
// Replace vars
responseString = responseString.replace("%%SERVICE%%", service.toString());
responseString = responseString.replace("%%NAME%%", name);
responseString = responseString.replace("%%IDENTIFIER%%", identifier);
responseString = responseString.replace("%%THEME%%", theme);
} catch (IOException e) {

View File

@@ -3,6 +3,7 @@ package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.exception.DataNotPublishedException;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
@@ -10,13 +11,13 @@ import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.ListUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
@@ -42,6 +43,7 @@ public class ArbitraryDataResource {
private int layerCount;
private Integer localChunkCount = null;
private Integer totalChunkCount = null;
private boolean exists = false;
public ArbitraryDataResource(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
this.resourceId = resourceId.toLowerCase();
@@ -60,6 +62,10 @@ public class ArbitraryDataResource {
// Avoid this for "quick" statuses, to speed things up
if (!quick) {
this.calculateChunkCounts();
if (!this.exists) {
return new ArbitraryResourceStatus(Status.NOT_PUBLISHED, this.localChunkCount, this.totalChunkCount);
}
}
if (resourceIdType != ResourceIdType.NAME) {
@@ -68,8 +74,7 @@ public class ArbitraryDataResource {
}
// Check if the name is blocked
if (ResourceListManager.getInstance()
.listContains("blockedNames", this.resourceId, false)) {
if (ListUtils.isNameBlocked(this.resourceId)) {
return new ArbitraryResourceStatus(Status.BLOCKED, this.localChunkCount, this.totalChunkCount);
}
@@ -134,21 +139,23 @@ public class ArbitraryDataResource {
return null;
}
public boolean delete() {
public boolean delete(boolean deleteMetadata) {
try {
this.fetchTransactions();
if (this.transactions == null) {
return false;
}
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
byte[] hash = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
byte[] signature = transactionData.getSignature();
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
if (arbitraryDataFile == null) {
continue;
}
// Delete any chunks or complete files from each transaction
arbitraryDataFile.deleteAll();
arbitraryDataFile.deleteAll(deleteMetadata);
}
// Also delete cached data for the entire resource
@@ -192,6 +199,9 @@ public class ArbitraryDataResource {
try {
this.fetchTransactions();
if (this.transactions == null) {
return false;
}
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
@@ -211,6 +221,14 @@ public class ArbitraryDataResource {
private void calculateChunkCounts() {
try {
this.fetchTransactions();
if (this.transactions == null) {
this.exists = false;
this.localChunkCount = 0;
this.totalChunkCount = 0;
return;
}
this.exists = true;
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
int localChunkCount = 0;
@@ -230,6 +248,9 @@ public class ArbitraryDataResource {
private boolean isRateLimited() {
try {
this.fetchTransactions();
if (this.transactions == null) {
return true;
}
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
@@ -253,6 +274,10 @@ public class ArbitraryDataResource {
private boolean isDataPotentiallyAvailable() {
try {
this.fetchTransactions();
if (this.transactions == null) {
return false;
}
Long now = NTP.getTime();
if (now == null) {
return false;
@@ -284,6 +309,10 @@ public class ArbitraryDataResource {
private boolean isDownloading() {
try {
this.fetchTransactions();
if (this.transactions == null) {
return false;
}
Long now = NTP.getTime();
if (now == null) {
return false;
@@ -325,7 +354,7 @@ public class ArbitraryDataResource {
if (latestPut == null) {
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
this.resourceId, this.service, this.identifierString());
throw new DataException(message);
throw new DataNotPublishedException(message);
}
this.latestPutTransaction = latestPut;
@@ -336,7 +365,10 @@ public class ArbitraryDataResource {
this.transactions = transactionDataList;
this.layerCount = transactionDataList.size();
} catch (DataException e) {
} catch (DataNotPublishedException e) {
// Ignore without logging
}
catch (DataException e) {
LOGGER.info(String.format("Repository error when fetching transactions for resource %s: %s", this, e.getMessage()));
}
}

View File

@@ -9,6 +9,7 @@ import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.crypto.AES;
import org.qortal.crypto.Crypto;
import org.qortal.data.PaymentData;
import org.qortal.data.transaction.ArbitraryTransactionData;
@@ -46,6 +47,7 @@ public class ArbitraryDataTransactionBuilder {
private static final double MAX_FILE_DIFF = 0.5f;
private final String publicKey58;
private final long fee;
private final Path path;
private final String name;
private Method method;
@@ -64,11 +66,12 @@ public class ArbitraryDataTransactionBuilder {
private ArbitraryTransactionData arbitraryTransactionData;
private ArbitraryDataFile arbitraryDataFile;
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name,
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, long fee, Path path, String name,
Method method, Service service, String identifier,
String title, String description, List<String> tags, Category category) {
this.repository = repository;
this.publicKey58 = publicKey58;
this.fee = fee;
this.path = path;
this.name = name;
this.method = method;
@@ -179,6 +182,7 @@ public class ArbitraryDataTransactionBuilder {
for (ModifiedPath path : metadata.getModifiedPaths()) {
if (path.getDiffType() != DiffType.COMPLETE_FILE) {
atLeastOnePatch = true;
break;
}
}
}
@@ -187,6 +191,14 @@ public class ArbitraryDataTransactionBuilder {
return Method.PUT;
}
// We can't use PATCH for on-chain data because this requires the .qortal directory, which can't be put on chain
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(this.path, false);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
if (shouldUseOnChainData) {
LOGGER.info("Data size is small enough to go on chain - using PUT");
return Method.PUT;
}
// State is appropriate for a PATCH transaction
return Method.PATCH;
}
@@ -227,10 +239,12 @@ public class ArbitraryDataTransactionBuilder {
random.nextBytes(lastReference);
}
Compression compression = Compression.ZIP;
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
// FUTURE? Use zip compression for directories, or no compression for single files
// Compression compression = (path.toFile().isDirectory()) ? Compression.ZIP : Compression.NONE;
// Use zip compression if data isn't going on chain
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method,
compression, title, description, tags, category);
@@ -248,45 +262,52 @@ public class ArbitraryDataTransactionBuilder {
throw new DataException("Arbitrary data file is null");
}
// Get chunks metadata file
// Get metadata file
ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile();
if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) {
throw new DataException(String.format("Chunks metadata data file is null but there are %d chunks", arbitraryDataFile.chunkCount()));
}
String digest58 = arbitraryDataFile.digest58();
if (digest58 == null) {
LOGGER.error("Unable to calculate file digest");
throw new DataException("Unable to calculate file digest");
// Default to using a data hash, with data held off-chain
ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
byte[] data = arbitraryDataFile.digest();
// For small, single-chunk resources, we can store the data directly on chain
if (shouldUseOnChainData && arbitraryDataFile.getBytes().length <= ArbitraryTransaction.MAX_DATA_SIZE && arbitraryDataFile.chunkCount() == 0) {
// Within allowed on-chain data size
dataType = DataType.RAW_DATA;
data = arbitraryDataFile.getBytes();
}
final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP,
lastReference, creatorPublicKey, 0L, null);
lastReference, creatorPublicKey, fee, null);
final int size = (int) arbitraryDataFile.size();
final int version = 5;
final int nonce = 0;
byte[] secret = arbitraryDataFile.getSecret();
final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
final byte[] digest = arbitraryDataFile.digest();
final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null;
final List<PaymentData> payments = new ArrayList<>();
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, service, nonce, size, name, identifier, method,
secret, compression, digest, dataType, metadataHash, payments);
version, service.value, nonce, size, name, identifier, method,
secret, compression, data, dataType, metadataHash, payments);
this.arbitraryTransactionData = transactionData;
} catch (DataException e) {
} catch (DataException | IOException e) {
if (arbitraryDataFile != null) {
arbitraryDataFile.deleteAll();
arbitraryDataFile.deleteAll(true);
}
throw(e);
throw new DataException(e);
}
}
private boolean isMetadataEqual(ArbitraryDataTransactionMetadata existingMetadata) {
if (existingMetadata == null) {
return !this.hasMetadata();
}
if (!Objects.equals(existingMetadata.getTitle(), this.title)) {
return false;
}
@@ -302,6 +323,10 @@ public class ArbitraryDataTransactionBuilder {
return true;
}
private boolean hasMetadata() {
return (this.title != null || this.description != null || this.category != null || this.tags != null);
}
public void computeNonce() throws DataException {
if (this.arbitraryTransactionData == null) {
throw new DataException("Arbitrary transaction data is required to compute nonce");
@@ -313,7 +338,7 @@ public class ArbitraryDataTransactionBuilder {
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
if (result != Transaction.ValidationResult.OK) {
arbitraryDataFile.deleteAll();
arbitraryDataFile.deleteAll(true);
throw new DataException(String.format("Arbitrary transaction invalid: %s", result));
}
LOGGER.info("Transaction is valid");

View File

@@ -1,5 +1,7 @@
package org.qortal.arbitrary;
import com.j256.simplemagic.ContentInfo;
import com.j256.simplemagic.ContentInfoUtil;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -23,16 +25,15 @@ import javax.crypto.NoSuchPaddingException;
import javax.crypto.SecretKey;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.net.FileNameMap;
import java.net.URLConnection;
import java.nio.file.*;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class ArbitraryDataWriter {
@@ -50,6 +51,8 @@ public class ArbitraryDataWriter {
private final String description;
private final List<String> tags;
private final Category category;
private List<String> files;
private String mimeType;
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
@@ -80,12 +83,15 @@ public class ArbitraryDataWriter {
this.description = ArbitraryDataTransactionMetadata.limitDescription(description);
this.tags = ArbitraryDataTransactionMetadata.limitTags(tags);
this.category = category;
this.files = new ArrayList<>(); // Populated in buildFileList()
this.mimeType = null; // Populated in buildFileList()
}
public void save() throws IOException, DataException, InterruptedException, MissingDataException {
try {
this.preExecute();
this.validateService();
this.buildFileList();
this.process();
this.compress();
this.encrypt();
@@ -101,10 +107,9 @@ public class ArbitraryDataWriter {
private void preExecute() throws DataException {
this.checkEnabled();
// Enforce compression when uploading a directory
File file = new File(this.filePath.toString());
if (file.isDirectory() && compression == Compression.NONE) {
throw new DataException("Unable to upload a directory without compression");
// Enforce compression when uploading multiple files
if (!FilesystemUtils.isSingleFileResource(this.filePath, false) && compression == Compression.NONE) {
throw new DataException("Unable to publish multiple files without compression");
}
// Create temporary working directory
@@ -143,6 +148,48 @@ public class ArbitraryDataWriter {
}
}
private void buildFileList() throws IOException {
// Check if the path already points to a single file
boolean isSingleFile = this.filePath.toFile().isFile();
Path singleFilePath = null;
if (isSingleFile) {
this.files.add(this.filePath.getFileName().toString());
singleFilePath = this.filePath;
}
else {
// Multi file resources (or a single file in a directory) require a walk through the directory tree
try (Stream<Path> stream = Files.walk(this.filePath)) {
this.files = stream
.filter(Files::isRegularFile)
.map(p -> this.filePath.relativize(p).toString())
.filter(s -> !s.isEmpty())
.collect(Collectors.toList());
if (this.files.size() == 1) {
singleFilePath = Paths.get(this.filePath.toString(), this.files.get(0));
// Update filePath to point to the single file (instead of the directory containing the file)
this.filePath = singleFilePath;
}
}
}
if (singleFilePath != null) {
// Single file resource, so try and determine the MIME type
ContentInfoUtil util = new ContentInfoUtil();
ContentInfo info = util.findMatch(singleFilePath.toFile());
if (info != null) {
// Attempt to extract MIME type from file contents
this.mimeType = info.getMimeType();
}
else {
// Fall back to using the filename
FileNameMap fileNameMap = URLConnection.getFileNameMap();
this.mimeType = fileNameMap.getContentTypeFor(singleFilePath.toFile().getName());
}
}
}
private void process() throws DataException, IOException, MissingDataException {
switch (this.method) {
@@ -269,9 +316,6 @@ public class ArbitraryDataWriter {
if (chunkCount > 0) {
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
}
else {
throw new DataException("Unable to split file into chunks");
}
}
private void createMetadataFile() throws IOException, DataException {
@@ -285,6 +329,8 @@ public class ArbitraryDataWriter {
metadata.setTags(this.tags);
metadata.setCategory(this.category);
metadata.setChunks(this.arbitraryDataFile.chunkHashList());
metadata.setFiles(this.files);
metadata.setMimeType(this.mimeType);
metadata.write();
// Create an ArbitraryDataFile from the JSON file (we don't have a signature yet)

View File

@@ -0,0 +1,22 @@
package org.qortal.arbitrary.exception;
import org.qortal.repository.DataException;
public class DataNotPublishedException extends DataException {
public DataNotPublishedException() {
}
public DataNotPublishedException(String message) {
super(message);
}
public DataNotPublishedException(String message, Throwable cause) {
super(message, cause);
}
public DataNotPublishedException(Throwable cause) {
super(cause);
}
}

View File

@@ -2,12 +2,14 @@ package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONException;
import org.qortal.repository.DataException;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -34,7 +36,7 @@ public class ArbitraryDataMetadata {
this.filePath = filePath;
}
protected void readJson() throws DataException {
protected void readJson() throws DataException, JSONException {
// To be overridden
}
@@ -44,8 +46,13 @@ public class ArbitraryDataMetadata {
public void read() throws IOException, DataException {
this.loadJson();
this.readJson();
try {
this.loadJson();
this.readJson();
} catch (JSONException e) {
throw new DataException(String.format("Unable to read JSON at path %s: %s", this.filePath, e.getMessage()));
}
}
public void write() throws IOException, DataException {
@@ -58,6 +65,10 @@ public class ArbitraryDataMetadata {
writer.close();
}
public void delete() throws IOException {
Files.delete(this.filePath);
}
protected void loadJson() throws IOException {
File metadataFile = new File(this.filePath.toString());
@@ -65,7 +76,7 @@ public class ArbitraryDataMetadata {
throw new IOException(String.format("Metadata file doesn't exist: %s", this.filePath.toString()));
}
this.jsonString = new String(Files.readAllBytes(this.filePath));
this.jsonString = new String(Files.readAllBytes(this.filePath), StandardCharsets.UTF_8);
}

View File

@@ -1,5 +1,6 @@
package org.qortal.arbitrary.metadata;
import org.json.JSONException;
import org.json.JSONObject;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
@@ -22,7 +23,7 @@ public class ArbitraryDataMetadataCache extends ArbitraryDataQortalMetadata {
}
@Override
protected void readJson() throws DataException {
protected void readJson() throws DataException, JSONException {
if (this.jsonString == null) {
throw new DataException("Patch JSON string is null");
}

View File

@@ -3,6 +3,7 @@ package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.qortal.arbitrary.ArbitraryDataDiff.*;
import org.qortal.repository.DataException;
@@ -40,7 +41,7 @@ public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata {
}
@Override
protected void readJson() throws DataException {
protected void readJson() throws DataException, JSONException {
if (this.jsonString == null) {
throw new DataException("Patch JSON string is null");
}

View File

@@ -2,12 +2,14 @@ package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONException;
import org.qortal.repository.DataException;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -46,20 +48,6 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
return null;
}
protected void readJson() throws DataException {
// To be overridden
}
protected void buildJson() {
// To be overridden
}
@Override
public void read() throws IOException, DataException {
this.loadJson();
this.readJson();
}
@Override
public void write() throws IOException, DataException {
@@ -82,7 +70,7 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
throw new IOException(String.format("Patch file doesn't exist: %s", path.toString()));
}
this.jsonString = new String(Files.readAllBytes(path));
this.jsonString = new String(Files.readAllBytes(path), StandardCharsets.UTF_8);
}
@@ -94,9 +82,4 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
}
}
public String getJsonString() {
return this.jsonString;
}
}

View File

@@ -1,11 +1,13 @@
package org.qortal.arbitrary.metadata;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.qortal.arbitrary.misc.Category;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@@ -19,9 +21,11 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
private String description;
private List<String> tags;
private Category category;
private List<String> files;
private String mimeType;
private static int MAX_TITLE_LENGTH = 80;
private static int MAX_DESCRIPTION_LENGTH = 500;
private static int MAX_DESCRIPTION_LENGTH = 240;
private static int MAX_TAG_LENGTH = 20;
private static int MAX_TAGS_COUNT = 5;
@@ -31,7 +35,7 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
}
@Override
protected void readJson() throws DataException {
protected void readJson() throws DataException, JSONException {
if (this.jsonString == null) {
throw new DataException("Transaction metadata JSON string is null");
}
@@ -77,6 +81,24 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
}
this.chunks = chunksList;
}
List<String> filesList = new ArrayList<>();
if (metadata.has("files")) {
JSONArray files = metadata.getJSONArray("files");
if (files != null) {
for (int i=0; i<files.length(); i++) {
String tag = files.getString(i);
if (tag != null) {
filesList.add(tag);
}
}
}
this.files = filesList;
}
if (metadata.has("mimeType")) {
this.mimeType = metadata.getString("mimeType");
}
}
@Override
@@ -111,6 +133,18 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
}
outer.put("chunks", chunks);
JSONArray files = new JSONArray();
if (this.files != null) {
for (String file : this.files) {
files.put(file);
}
}
outer.put("files", files);
if (this.mimeType != null && !this.mimeType.isEmpty()) {
outer.put("mimeType", this.mimeType);
}
this.jsonString = outer.toString(2);
LOGGER.trace("Transaction metadata: {}", this.jsonString);
}
@@ -156,6 +190,22 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
return this.category;
}
public void setFiles(List<String> files) {
this.files = files;
}
public List<String> getFiles() {
return this.files;
}
public void setMimeType(String mimeType) {
this.mimeType = mimeType;
}
public String getMimeType() {
return this.mimeType;
}
public boolean containsChunk(byte[] chunk) {
for (byte[] c : this.chunks) {
if (Arrays.equals(c, chunk)) {
@@ -168,6 +218,25 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
// Static helper methods
public static String trimUTF8String(String string, int maxLength) {
byte[] inputBytes = string.getBytes(StandardCharsets.UTF_8);
int length = Math.min(inputBytes.length, maxLength);
byte[] outputBytes = new byte[length];
System.arraycopy(inputBytes, 0, outputBytes, 0, length);
String result = new String(outputBytes, StandardCharsets.UTF_8);
// check if last character is truncated
int lastIndex = result.length() - 1;
if (lastIndex > 0 && result.charAt(lastIndex) != string.charAt(lastIndex)) {
// last character is truncated so remove the last character
return result.substring(0, lastIndex);
}
return result;
}
public static String limitTitle(String title) {
if (title == null) {
return null;
@@ -176,7 +245,7 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
return null;
}
return title.substring(0, Math.min(title.length(), MAX_TITLE_LENGTH));
return trimUTF8String(title, MAX_TITLE_LENGTH);
}
public static String limitDescription(String description) {
@@ -187,7 +256,7 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
return null;
}
return description.substring(0, Math.min(description.length(), MAX_DESCRIPTION_LENGTH));
return trimUTF8String(description, MAX_DESCRIPTION_LENGTH);
}
public static List<String> limitTags(List<String> tags) {

View File

@@ -1,26 +1,66 @@
package org.qortal.arbitrary.misc;
import org.apache.commons.io.FilenameUtils;
import org.json.JSONObject;
import org.qortal.arbitrary.ArbitraryDataRenderer;
import org.qortal.transaction.Transaction;
import org.qortal.utils.FilesystemUtils;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.*;
import com.fasterxml.jackson.databind.ObjectMapper;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
public enum Service {
AUTO_UPDATE(1, false, null, null),
ARBITRARY_DATA(100, false, null, null),
WEBSITE(200, true, null, null) {
AUTO_UPDATE(1, false, null, false, false, null),
ARBITRARY_DATA(100, false, null, false, false, null),
QCHAT_ATTACHMENT(120, true, 1024*1024L, true, false, null) {
@Override
public ValidationResult validate(Path path) {
public ValidationResult validate(Path path) throws IOException {
ValidationResult superclassResult = super.validate(path);
if (superclassResult != ValidationResult.OK) {
return superclassResult;
}
File[] files = path.toFile().listFiles();
// If already a single file, replace the list with one that contains that file only
if (files == null && path.toFile().isFile()) {
files = new File[] { path.toFile() };
}
// Now validate the file's extension
if (files != null && files[0] != null) {
final String extension = FilenameUtils.getExtension(files[0].getName()).toLowerCase();
// We must allow blank file extensions because these are used by data published from a plaintext or base64-encoded string
final List<String> allowedExtensions = Arrays.asList("zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
if (extension == null || !allowedExtensions.contains(extension)) {
return ValidationResult.INVALID_FILE_EXTENSION;
}
}
return ValidationResult.OK;
}
},
QCHAT_ATTACHMENT_PRIVATE(121, true, 1024*1024L, true, true, null),
ATTACHMENT(130, false, 50*1024*1024L, true, false, null),
ATTACHMENT_PRIVATE(131, true, 50*1024*1024L, true, true, null),
FILE(140, false, null, true, false, null),
FILE_PRIVATE(141, true, null, true, true, null),
FILES(150, false, null, false, false, null),
CHAIN_DATA(160, true, 239L, true, false, null),
WEBSITE(200, true, null, false, false, null) {
@Override
public ValidationResult validate(Path path) throws IOException {
ValidationResult superclassResult = super.validate(path);
if (superclassResult != ValidationResult.OK) {
return superclassResult;
}
// Custom validation function to require an index HTML file in the root directory
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
String[] files = path.toFile().list();
@@ -35,33 +75,125 @@ public enum Service {
return ValidationResult.MISSING_INDEX_FILE;
}
},
GIT_REPOSITORY(300, false, null, null),
IMAGE(400, true, 10*1024*1024L, null),
THUMBNAIL(410, true, 500*1024L, null),
VIDEO(500, false, null, null),
AUDIO(600, false, null, null),
BLOG(700, false, null, null),
BLOG_POST(777, false, null, null),
BLOG_COMMENT(778, false, null, null),
DOCUMENT(800, false, null, null),
LIST(900, true, null, null),
PLAYLIST(910, true, null, null),
APP(1000, false, null, null),
METADATA(1100, false, null, null),
QORTAL_METADATA(1111, true, 10*1024L, Arrays.asList("title", "description", "tags"));
GIT_REPOSITORY(300, false, null, false, false, null),
IMAGE(400, true, 10*1024*1024L, true, false, null),
IMAGE_PRIVATE(401, true, 10*1024*1024L, true, true, null),
THUMBNAIL(410, true, 500*1024L, true, false, null),
QCHAT_IMAGE(420, true, 500*1024L, true, false, null),
VIDEO(500, false, null, true, false, null),
VIDEO_PRIVATE(501, true, null, true, true, null),
AUDIO(600, false, null, true, false, null),
AUDIO_PRIVATE(601, true, null, true, true, null),
QCHAT_AUDIO(610, true, 10*1024*1024L, true, false, null),
QCHAT_VOICE(620, true, 10*1024*1024L, true, false, null),
VOICE(630, true, 10*1024*1024L, true, false, null),
VOICE_PRIVATE(631, true, 10*1024*1024L, true, true, null),
PODCAST(640, false, null, true, false, null),
BLOG(700, false, null, false, false, null),
BLOG_POST(777, false, null, true, false, null),
BLOG_COMMENT(778, true, 500*1024L, true, false, null),
DOCUMENT(800, false, null, true, false, null),
DOCUMENT_PRIVATE(801, true, null, true, true, null),
LIST(900, true, null, true, false, null),
PLAYLIST(910, true, null, true, false, null),
APP(1000, true, 50*1024*1024L, false, false, null),
METADATA(1100, false, null, true, false, null),
JSON(1110, true, 25*1024L, true, false, null) {
@Override
public ValidationResult validate(Path path) throws IOException {
ValidationResult superclassResult = super.validate(path);
if (superclassResult != ValidationResult.OK) {
return superclassResult;
}
// Require valid JSON
byte[] data = FilesystemUtils.getSingleFileContents(path, 25*1024);
String json = new String(data, StandardCharsets.UTF_8);
try {
objectMapper.readTree(json);
return ValidationResult.OK;
} catch (IOException e) {
return ValidationResult.INVALID_CONTENT;
}
}
},
GIF_REPOSITORY(1200, true, 25*1024*1024L, false, false, null) {
@Override
public ValidationResult validate(Path path) throws IOException {
ValidationResult superclassResult = super.validate(path);
if (superclassResult != ValidationResult.OK) {
return superclassResult;
}
// Custom validation function to require .gif files only, and at least 1
int gifCount = 0;
File[] files = path.toFile().listFiles();
// If already a single file, replace the list with one that contains that file only
if (files == null && path.toFile().isFile()) {
files = new File[] { path.toFile() };
}
if (files != null) {
for (File file : files) {
if (file.getName().equals(".qortal")) {
continue;
}
if (file.isDirectory()) {
return ValidationResult.DIRECTORIES_NOT_ALLOWED;
}
String extension = FilenameUtils.getExtension(file.getName()).toLowerCase();
if (!Objects.equals(extension, "gif")) {
return ValidationResult.INVALID_FILE_EXTENSION;
}
gifCount++;
}
}
if (gifCount == 0) {
return ValidationResult.MISSING_DATA;
}
return ValidationResult.OK;
}
},
STORE(1300, false, null, true, false, null),
PRODUCT(1310, false, null, true, false, null),
OFFER(1330, false, null, true, false, null),
COUPON(1340, false, null, true, false, null),
CODE(1400, false, null, true, false, null),
PLUGIN(1410, false, null, true, false, null),
EXTENSION(1420, false, null, true, false, null),
GAME(1500, false, null, false, false, null),
ITEM(1510, false, null, true, false, null),
NFT(1600, false, null, true, false, null),
DATABASE(1700, false, null, false, false, null),
SNAPSHOT(1710, false, null, false, false, null),
COMMENT(1800, true, 500*1024L, true, false, null),
CHAIN_COMMENT(1810, true, 239L, true, false, null),
MAIL(1900, true, 1024*1024L, true, false, null),
MAIL_PRIVATE(1901, true, 1024*1024L, true, true, null),
MESSAGE(1910, true, 1024*1024L, true, false, null),
MESSAGE_PRIVATE(1911, true, 1024*1024L, true, true, null);
public final int value;
private final boolean requiresValidation;
private final Long maxSize;
private final boolean single;
private final boolean isPrivate;
private final List<String> requiredKeys;
private static final Map<Integer, Service> map = stream(Service.values())
.collect(toMap(service -> service.value, service -> service));
Service(int value, boolean requiresValidation, Long maxSize, List<String> requiredKeys) {
// For JSON validation
private static final ObjectMapper objectMapper = new ObjectMapper();
private static final String encryptedDataPrefix = "qortalEncryptedData";
private static final String encryptedGroupDataPrefix = "qortalGroupEncryptedData";
Service(int value, boolean requiresValidation, Long maxSize, boolean single, boolean isPrivate, List<String> requiredKeys) {
this.value = value;
this.requiresValidation = requiresValidation;
this.maxSize = maxSize;
this.single = single;
this.isPrivate = isPrivate;
this.requiredKeys = requiredKeys;
}
@@ -70,7 +202,9 @@ public enum Service {
return ValidationResult.OK;
}
byte[] data = FilesystemUtils.getSingleFileContents(path);
// Load the first 25KB of data. This only needs to be long enough to check the prefix
// and also to allow for possible additional future validation of smaller files.
byte[] data = FilesystemUtils.getSingleFileContents(path, 25*1024);
long size = FilesystemUtils.getDirectorySize(path);
// Validate max size if needed
@@ -80,6 +214,22 @@ public enum Service {
}
}
// Validate file count if needed
if (this.single && data == null) {
return ValidationResult.INVALID_FILE_COUNT;
}
// Validate private data for single file resources
if (this.single) {
String dataString = new String(data, StandardCharsets.UTF_8);
if (this.isPrivate && !dataString.startsWith(encryptedDataPrefix) && !dataString.startsWith(encryptedGroupDataPrefix)) {
return ValidationResult.DATA_NOT_ENCRYPTED;
}
if (!this.isPrivate && (dataString.startsWith(encryptedDataPrefix) || dataString.startsWith(encryptedGroupDataPrefix))) {
return ValidationResult.DATA_ENCRYPTED;
}
}
// Validate required keys if needed
if (this.requiredKeys != null) {
if (data == null) {
@@ -98,7 +248,12 @@ public enum Service {
}
public boolean isValidationRequired() {
return this.requiresValidation;
// We must always validate single file resources, to ensure they are actually a single file
return this.requiresValidation || this.single;
}
public boolean isPrivate() {
return this.isPrivate;
}
public static Service valueOf(int value) {
@@ -106,15 +261,53 @@ public enum Service {
}
public static JSONObject toJsonObject(byte[] data) {
String dataString = new String(data);
String dataString = new String(data, StandardCharsets.UTF_8);
return new JSONObject(dataString);
}
public static List<Service> publicServices() {
List<Service> privateServices = new ArrayList<>();
for (Service service : Service.values()) {
if (!service.isPrivate) {
privateServices.add(service);
}
}
return privateServices;
}
/**
* Fetch a list of Service objects that require encrypted data.
*
* These can ultimately be used to help inform the cleanup manager
* on the best order to delete files when the node runs out of space.
* Public data should be given priority over private data (unless
* this node is part of a data market contract for that data - this
* isn't developed yet).
*
* @return a list of Service objects that require encrypted data.
*/
public static List<Service> privateServices() {
List<Service> privateServices = new ArrayList<>();
for (Service service : Service.values()) {
if (service.isPrivate) {
privateServices.add(service);
}
}
return privateServices;
}
public enum ValidationResult {
OK(1),
MISSING_KEYS(2),
EXCEEDS_SIZE_LIMIT(3),
MISSING_INDEX_FILE(4);
MISSING_INDEX_FILE(4),
DIRECTORIES_NOT_ALLOWED(5),
INVALID_FILE_EXTENSION(6),
MISSING_DATA(7),
INVALID_FILE_COUNT(8),
INVALID_CONTENT(9),
DATA_NOT_ENCRYPTED(10),
DATA_ENCRYPTED(10);
public final int value;

View File

@@ -3,9 +3,12 @@ package org.qortal.block;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.charset.StandardCharsets;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.*;
@@ -24,6 +27,7 @@ import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.block.BlockChain.AccountLevelShareBin;
import org.qortal.controller.OnlineAccountsManager;
import org.qortal.crypto.Crypto;
import org.qortal.crypto.Qortal25519Extras;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.account.EligibleQoraHolderData;
@@ -85,7 +89,8 @@ public class Block {
ONLINE_ACCOUNT_UNKNOWN(71),
ONLINE_ACCOUNT_SIGNATURES_MISSING(72),
ONLINE_ACCOUNT_SIGNATURES_MALFORMED(73),
ONLINE_ACCOUNT_SIGNATURE_INCORRECT(74);
ONLINE_ACCOUNT_SIGNATURE_INCORRECT(74),
ONLINE_ACCOUNT_NONCE_INCORRECT(75);
public final int value;
@@ -118,6 +123,8 @@ public class Block {
/** Remote/imported/loaded AT states */
protected List<ATStateData> atStates;
/** Remote hash of AT states - in lieu of full AT state data in {@code atStates} */
protected byte[] atStatesHash;
/** Locally-generated AT states */
protected List<ATStateData> ourAtStates;
/** Locally-generated AT fees */
@@ -129,7 +136,7 @@ public class Block {
}
/** Lazy-instantiated expanded info on block's online accounts. */
private static class ExpandedAccount {
public static class ExpandedAccount {
private final RewardShareData rewardShareData;
private final int sharePercent;
private final boolean isRecipientAlsoMinter;
@@ -162,6 +169,13 @@ public class Block {
}
}
public Account getMintingAccount() {
return this.mintingAccount;
}
public Account getRecipientAccount() {
return this.recipientAccount;
}
/**
* Returns share bin for expanded account.
* <p>
@@ -178,8 +192,11 @@ public class Block {
if (accountLevel <= 0)
return null; // level 0 isn't included in any share bins
// Select the correct set of share bins based on block height
final BlockChain blockChain = BlockChain.getInstance();
final AccountLevelShareBin[] shareBinsByLevel = blockChain.getShareBinsByAccountLevel();
final AccountLevelShareBin[] shareBinsByLevel = (blockHeight >= blockChain.getSharesByLevelV2Height()) ?
blockChain.getShareBinsByAccountLevelV2() : blockChain.getShareBinsByAccountLevelV1();
if (accountLevel > shareBinsByLevel.length)
return null;
@@ -192,6 +209,11 @@ public class Block {
}
public boolean hasShareBin(AccountLevelShareBin shareBin, int blockHeight) {
AccountLevelShareBin ourShareBin = this.getShareBin(blockHeight);
return ourShareBin != null && shareBin.id == ourShareBin.id;
}
public long distribute(long accountAmount, Map<String, Long> balanceChanges) {
if (this.isRecipientAlsoMinter) {
// minter & recipient the same - simpler case
@@ -216,11 +238,10 @@ public class Block {
return accountAmount;
}
}
/** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */
private List<ExpandedAccount> cachedExpandedAccounts = null;
/** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */
private List<OnlineAccountData> cachedValidOnlineAccounts = null;
/** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */
private List<RewardShareData> cachedOnlineRewardShares = null;
@@ -255,7 +276,7 @@ public class Block {
* Constructs new Block using passed transaction and AT states.
* <p>
* This constructor typically used when receiving a serialized block over the network.
*
*
* @param repository
* @param blockData
* @param transactions
@@ -281,6 +302,35 @@ public class Block {
this.blockData.setTotalFees(totalFees);
}
/**
* Constructs new Block using passed transaction and minimal AT state info.
* <p>
* This constructor typically used when receiving a serialized block over the network.
*
* @param repository
* @param blockData
* @param transactions
* @param atStatesHash
*/
public Block(Repository repository, BlockData blockData, List<TransactionData> transactions, byte[] atStatesHash) {
this(repository, blockData);
this.transactions = new ArrayList<>();
long totalFees = 0;
// We have to sum fees too
for (TransactionData transactionData : transactions) {
this.transactions.add(Transaction.fromData(repository, transactionData));
totalFees += transactionData.getFee();
}
this.atStatesHash = atStatesHash;
totalFees += this.blockData.getATFees();
this.blockData.setTotalFees(totalFees);
}
/**
* Constructs new Block with empty transaction list, using passed minter account.
*
@@ -313,18 +363,36 @@ public class Block {
int version = parentBlock.getNextBlockVersion();
byte[] reference = parentBlockData.getSignature();
// Fetch our list of online accounts
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts();
if (onlineAccounts.isEmpty()) {
LOGGER.error("No online accounts - not even our own?");
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
if (minterLevel == 0) {
LOGGER.error("Minter effective level returned zero?");
return null;
}
// Find newest online accounts timestamp
long onlineAccountsTimestamp = 0;
for (OnlineAccountData onlineAccountData : onlineAccounts) {
if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp)
onlineAccountsTimestamp = onlineAccountData.getTimestamp();
int height = parentBlockData.getHeight() + 1;
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp();
// Fetch our list of online accounts, removing any that are missing a nonce
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp);
onlineAccounts.removeIf(a -> a.getNonce() == null || a.getNonce() < 0);
// After feature trigger, remove any online accounts that are level 0
if (height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
onlineAccounts.removeIf(a -> {
try {
return Account.getRewardShareEffectiveMintingLevel(repository, a.getPublicKey()) == 0;
} catch (DataException e) {
// Something went wrong, so remove the account
return true;
}
});
}
if (onlineAccounts.isEmpty()) {
LOGGER.debug("No online accounts - not even our own?");
return null;
}
// Load sorted list of reward share public keys into memory, so that the indexes can be obtained.
@@ -335,10 +403,6 @@ public class Block {
// Map using index into sorted list of reward-shares as key
Map<Integer, OnlineAccountData> indexedOnlineAccounts = new HashMap<>();
for (OnlineAccountData onlineAccountData : onlineAccounts) {
// Disregard online accounts with different timestamps
if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp)
continue;
Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys);
if (accountIndex == null)
// Online account (reward-share) with current timestamp but reward-share cancelled
@@ -355,29 +419,43 @@ public class Block {
byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet);
int onlineAccountsCount = onlineAccountsSet.size();
// Concatenate online account timestamp signatures (in correct order)
byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH];
for (int i = 0; i < onlineAccountsCount; ++i) {
Integer accountIndex = accountIndexes.get(i);
OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex);
System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH);
// Collate all signatures
Collection<byte[]> signaturesToAggregate = indexedOnlineAccounts.values()
.stream()
.map(OnlineAccountData::getSignature)
.collect(Collectors.toList());
// Aggregated, single signature
byte[] onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate);
// Add nonces to the end of the online accounts signatures
try {
// Create ordered list of nonce values
List<Integer> nonces = new ArrayList<>();
for (int i = 0; i < onlineAccountsCount; ++i) {
Integer accountIndex = accountIndexes.get(i);
OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex);
nonces.add(onlineAccountData.getNonce());
}
// Encode the nonces to a byte array
byte[] encodedNonces = BlockTransformer.encodeOnlineAccountNonces(nonces);
// Append the encoded nonces to the encoded online account signatures
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
outputStream.write(onlineAccountsSignatures);
outputStream.write(encodedNonces);
onlineAccountsSignatures = outputStream.toByteArray();
}
catch (TransformationException | IOException e) {
return null;
}
byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData,
minter.getPublicKey(), encodedOnlineAccounts));
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
if (minterLevel == 0) {
LOGGER.error("Minter effective level returned zero?");
return null;
}
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
int transactionCount = 0;
byte[] transactionsSignature = null;
int height = parentBlockData.getHeight() + 1;
int atCount = 0;
long atFees = 0;
@@ -579,6 +657,10 @@ public class Block {
return this.atStates;
}
public byte[] getAtStatesHash() {
return this.atStatesHash;
}
/**
* Return expanded info on block's online accounts.
* <p>
@@ -971,6 +1053,15 @@ public class Block {
if (onlineRewardShares == null)
return ValidationResult.ONLINE_ACCOUNT_UNKNOWN;
// After feature trigger, require all online account minters to be greater than level 0
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
for (ExpandedAccount account : expandedAccounts) {
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
}
}
// If block is past a certain age then we simply assume the signatures were correct
long signatureRequirementThreshold = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMinLifetime();
if (this.blockData.getTimestamp() < signatureRequirementThreshold)
@@ -979,49 +1070,64 @@ public class Block {
if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0)
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING;
if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH)
final int signaturesLength = Transformer.SIGNATURE_LENGTH;
final int noncesLength = onlineRewardShares.size() * Transformer.INT_LENGTH;
// We expect nonces to be appended to the online accounts signatures
if (this.blockData.getOnlineAccountsSignatures().length != signaturesLength + noncesLength)
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED;
// Check signatures
long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp();
byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp);
// If this block is much older than current online timestamp, then there's no point checking current online accounts
List<OnlineAccountData> currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.ONLINE_TIMESTAMP_MODULUS
? null
: OnlineAccountsManager.getInstance().getOnlineAccounts();
List<OnlineAccountData> latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts();
byte[] encodedOnlineAccountSignatures = this.blockData.getOnlineAccountsSignatures();
// Extract online accounts' timestamp signatures from block data
List<byte[]> onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures());
// Split online account signatures into signature(s) + nonces, then validate the nonces
byte[] extractedSignatures = BlockTransformer.extract(encodedOnlineAccountSignatures, 0, signaturesLength);
byte[] extractedNonces = BlockTransformer.extract(encodedOnlineAccountSignatures, signaturesLength, onlineRewardShares.size() * Transformer.INT_LENGTH);
encodedOnlineAccountSignatures = extractedSignatures;
// We'll build up a list of online accounts to hand over to Controller if block is added to chain
// and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block...
List<OnlineAccountData> ourOnlineAccounts = new ArrayList<>();
List<Integer> nonces = BlockTransformer.decodeOnlineAccountNonces(extractedNonces);
for (int i = 0; i < onlineAccountsSignatures.size(); ++i) {
byte[] signature = onlineAccountsSignatures.get(i);
// Build block's view of online accounts (without signatures, as we don't need them here)
Set<OnlineAccountData> onlineAccounts = new HashSet<>();
for (int i = 0; i < onlineRewardShares.size(); ++i) {
Integer nonce = nonces.get(i);
byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey();
OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey);
ourOnlineAccounts.add(onlineAccountData);
// If signature is still current then no need to perform Ed25519 verify
if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData))
// remove() returned true, so online account still current
// and one less entry in currentOnlineAccounts to check next time
continue;
// If signature was okay in latest block then no need to perform Ed25519 verify
if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData))
continue;
if (!Crypto.verify(publicKey, signature, onlineTimestampBytes))
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, null, publicKey, nonce);
onlineAccounts.add(onlineAccountData);
}
// Remove those already validated & cached by online accounts manager - no need to re-validate them
OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp);
// Validate the rest
for (OnlineAccountData onlineAccount : onlineAccounts)
if (!OnlineAccountsManager.getInstance().verifyMemoryPoW(onlineAccount, null))
return ValidationResult.ONLINE_ACCOUNT_NONCE_INCORRECT;
// Cache the valid online accounts as they will likely be needed for the next block
OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp);
// Extract online accounts' timestamp signatures from block data. Only one signature if aggregated.
List<byte[]> onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(encodedOnlineAccountSignatures);
// Aggregate all public keys
Collection<byte[]> publicKeys = onlineRewardShares.stream()
.map(RewardShareData::getRewardSharePublicKey)
.collect(Collectors.toList());
byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys);
byte[] aggregateSignature = onlineAccountsSignatures.get(0);
// One-step verification of aggregate signature using aggregate public key
if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes))
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
// All online accounts valid, so save our list of online accounts for potential later use
this.cachedValidOnlineAccounts = ourOnlineAccounts;
this.cachedOnlineRewardShares = onlineRewardShares;
return ValidationResult.OK;
@@ -1168,6 +1274,7 @@ public class Block {
}
}
} catch (DataException e) {
LOGGER.info("DataException during transaction validation", e);
return ValidationResult.TRANSACTION_INVALID;
} finally {
// Rollback repository changes made by test-processing transactions above
@@ -1194,7 +1301,7 @@ public class Block {
*/
private ValidationResult areAtsValid() throws DataException {
// Locally generated AT states should be valid so no need to re-execute them
if (this.ourAtStates == this.getATStates()) // Note object reference compare
if (this.ourAtStates != null && this.ourAtStates == this.atStates) // Note object reference compare
return ValidationResult.OK;
// Generate local AT states for comparison
@@ -1208,8 +1315,33 @@ public class Block {
if (this.ourAtFees != this.blockData.getATFees())
return ValidationResult.AT_STATES_MISMATCH;
// Note: this.atStates fully loaded thanks to this.getATStates() call above
for (int s = 0; s < this.atStates.size(); ++s) {
// If we have a single AT states hash then compare that in preference
if (this.atStatesHash != null) {
int atBytesLength = blockData.getATCount() * BlockTransformer.AT_ENTRY_LENGTH;
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
try {
for (ATStateData atStateData : this.ourAtStates) {
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
atHashBytes.write(atStateData.getStateHash());
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
}
} catch (IOException e) {
throw new DataException("Couldn't validate AT states hash due to serialization issue?", e);
}
byte[] ourAtStatesHash = Crypto.digest(atHashBytes.toByteArray());
if (!Arrays.equals(ourAtStatesHash, this.atStatesHash))
return ValidationResult.AT_STATES_MISMATCH;
// Use our AT state data from now on
this.atStates = this.ourAtStates;
return ValidationResult.OK;
}
// Note: this.atStates fully loaded thanks to this.getATStates() call:
this.getATStates();
for (int s = 0; s < this.ourAtStates.size(); ++s) {
ATStateData ourAtState = this.ourAtStates.get(s);
ATStateData theirAtState = this.atStates.get(s);
@@ -1335,6 +1467,9 @@ public class Block {
if (this.blockData.getHeight() == 212937)
// Apply fix for block 212937
Block212937.processFix(this);
else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height())
SelfSponsorshipAlgoV1Block.processAccountPenalties(this);
}
// We're about to (test-)process a batch of transactions,
@@ -1367,9 +1502,6 @@ public class Block {
postBlockTidy();
// Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block
OnlineAccountsManager.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts);
// Log some debugging info relating to the block weight calculation
this.logDebugInfo();
}
@@ -1394,19 +1526,23 @@ public class Block {
// Batch update in repository
repository.getAccountRepository().modifyMintedBlockCounts(allUniqueExpandedAccounts.stream().map(AccountData::getAddress).collect(Collectors.toList()), +1);
// Keep track of level bumps in case we need to apply to other entries
Map<String, Integer> bumpedAccounts = new HashMap<>();
// Local changes and also checks for level bump
for (AccountData accountData : allUniqueExpandedAccounts) {
// Adjust count locally (in Java)
accountData.setBlocksMinted(accountData.getBlocksMinted() + 1);
LOGGER.trace(() -> String.format("Block minter %s up to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment();
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
if (newLevel > accountData.getLevel()) {
// Account has increased in level!
accountData.setLevel(newLevel);
bumpedAccounts.put(accountData.getAddress(), newLevel);
repository.getAccountRepository().setLevel(accountData);
LOGGER.trace(() -> String.format("Block minter %s bumped to level %d", accountData.getAddress(), accountData.getLevel()));
}
@@ -1414,6 +1550,25 @@ public class Block {
break;
}
}
// Also bump other entries if need be
if (!bumpedAccounts.isEmpty()) {
for (ExpandedAccount expandedAccount : expandedAccounts) {
Integer newLevel = bumpedAccounts.get(expandedAccount.mintingAccountData.getAddress());
if (newLevel != null && expandedAccount.mintingAccountData.getLevel() != newLevel) {
expandedAccount.mintingAccountData.setLevel(newLevel);
LOGGER.trace("Also bumped {} to level {}", expandedAccount.mintingAccountData.getAddress(), newLevel);
}
if (!expandedAccount.isRecipientAlsoMinter) {
newLevel = bumpedAccounts.get(expandedAccount.recipientAccountData.getAddress());
if (newLevel != null && expandedAccount.recipientAccountData.getLevel() != newLevel) {
expandedAccount.recipientAccountData.setLevel(newLevel);
LOGGER.trace("Also bumped {} to level {}", expandedAccount.recipientAccountData.getAddress(), newLevel);
}
}
}
}
}
protected void processBlockRewards() throws DataException {
@@ -1531,12 +1686,14 @@ public class Block {
transactionData.getSignature());
this.repository.getBlockRepository().save(blockTransactionData);
// Update transaction's height in repository
// Update transaction's height in repository and local transactionData
transactionRepository.updateBlockHeight(transactionData.getSignature(), this.blockData.getHeight());
// Update local transactionData's height too
transaction.getTransactionData().setBlockHeight(this.blockData.getHeight());
// Update transaction's sequence in repository and local transactionData
transactionRepository.updateBlockSequence(transactionData.getSignature(), sequence);
transaction.getTransactionData().setBlockSequence(sequence);
// No longer unconfirmed
transactionRepository.confirmTransaction(transactionData.getSignature());
@@ -1573,6 +1730,9 @@ public class Block {
// Revert fix for block 212937
Block212937.orphanFix(this);
else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height())
SelfSponsorshipAlgoV1Block.orphanAccountPenalties(this);
// Block rewards, including transaction fees, removed after transactions undone
orphanBlockRewards();
@@ -1585,9 +1745,6 @@ public class Block {
this.blockData.setHeight(null);
postBlockTidy();
// Remove any cached, valid online accounts data from Controller
OnlineAccountsManager.getInstance().popLatestBlocksOnlineAccounts();
}
protected void orphanTransactionsFromBlock() throws DataException {
@@ -1623,6 +1780,9 @@ public class Block {
// Unset height
transactionRepository.updateBlockHeight(transactionData.getSignature(), null);
// Unset sequence
transactionRepository.updateBlockSequence(transactionData.getSignature(), null);
}
transactionRepository.deleteParticipants(transactionData);
@@ -1704,7 +1864,7 @@ public class Block {
accountData.setBlocksMinted(accountData.getBlocksMinted() - 1);
LOGGER.trace(() -> String.format("Block minter %s down to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment();
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
@@ -1824,13 +1984,72 @@ public class Block {
final List<ExpandedAccount> onlineFounderAccounts = expandedAccounts.stream().filter(expandedAccount -> expandedAccount.isMinterFounder).collect(Collectors.toList());
final boolean haveFounders = !onlineFounderAccounts.isEmpty();
// Select the correct set of share bins based on block height
List<AccountLevelShareBin> accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
// Determine reward candidates based on account level
List<AccountLevelShareBin> accountLevelShareBins = BlockChain.getInstance().getAccountLevelShareBins();
for (int binIndex = 0; binIndex < accountLevelShareBins.size(); ++binIndex) {
// Find all accounts in share bin. getShareBin() returns null for minter accounts that are also founders, so they are effectively filtered out.
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
List<AccountLevelShareBin> accountLevelShareBins = new ArrayList<>();
for (AccountLevelShareBin accountLevelShareBin : accountLevelShareBinsForBlock) {
accountLevelShareBins.add((AccountLevelShareBin) accountLevelShareBin.clone());
}
Map<Integer, List<ExpandedAccount>> accountsForShareBin = new HashMap<>();
// We might need to combine some share bins if they haven't reached the minimum number of minters yet
for (int binIndex = accountLevelShareBins.size()-1; binIndex >= 0; --binIndex) {
AccountLevelShareBin accountLevelShareBin = accountLevelShareBins.get(binIndex);
// Object reference compare is OK as all references are read-only from blockchain config.
List<ExpandedAccount> binnedAccounts = expandedAccounts.stream().filter(accountInfo -> accountInfo.getShareBin(this.blockData.getHeight()) == accountLevelShareBin).collect(Collectors.toList());
// Find all accounts in share bin. getShareBin() returns null for minter accounts that are also founders, so they are effectively filtered out.
List<ExpandedAccount> binnedAccounts = expandedAccounts.stream().filter(accountInfo -> accountInfo.hasShareBin(accountLevelShareBin, this.blockData.getHeight())).collect(Collectors.toList());
// Add any accounts that have been moved down from a higher tier
List<ExpandedAccount> existingBinnedAccounts = accountsForShareBin.get(binIndex);
if (existingBinnedAccounts != null)
binnedAccounts.addAll(existingBinnedAccounts);
// Logic below may only apply to higher levels, and only for share bins with a specific range of online accounts
if (accountLevelShareBin.levels.get(0) < BlockChain.getInstance().getShareBinActivationMinLevel() ||
binnedAccounts.isEmpty() || binnedAccounts.size() >= BlockChain.getInstance().getMinAccountsToActivateShareBin()) {
// Add all accounts for this share bin to the accountsForShareBin list
accountsForShareBin.put(binIndex, binnedAccounts);
continue;
}
// Share bin contains more than one, but less than the minimum number of minters. We treat this share bin
// as not activated yet. In these cases, the rewards and minters are combined and paid out to the previous
// share bin, to prevent a single or handful of accounts receiving the entire rewards for a share bin.
//
// Example:
//
// - Share bin for levels 5 and 6 has 100 minters
// - Share bin for levels 7 and 8 has 10 minters
//
// This is below the minimum of 30, so share bins are reconstructed as follows:
//
// - Share bin for levels 5 and 6 now contains 110 minters
// - Share bin for levels 7 and 8 now contains 0 minters
// - Share bin for levels 5 and 6 now pays out rewards for levels 5, 6, 7, and 8
// - Share bin for levels 7 and 8 pays zero rewards
//
// This process is iterative, so will combine several tiers if needed.
// Designate this share bin as empty
accountsForShareBin.put(binIndex, new ArrayList<>());
// Move the accounts originally intended for this share bin to the previous one
accountsForShareBin.put(binIndex - 1, binnedAccounts);
// Move the block reward from this share bin to the previous one
AccountLevelShareBin previousShareBin = accountLevelShareBins.get(binIndex - 1);
previousShareBin.share += accountLevelShareBin.share;
accountLevelShareBin.share = 0L;
}
// Now loop through (potentially modified) share bins and determine the reward candidates
for (int binIndex = 0; binIndex < accountLevelShareBins.size(); ++binIndex) {
AccountLevelShareBin accountLevelShareBin = accountLevelShareBins.get(binIndex);
List<ExpandedAccount> binnedAccounts = accountsForShareBin.get(binIndex);
// No online accounts in this bin? Skip to next one
if (binnedAccounts.isEmpty())
@@ -1848,7 +2067,7 @@ public class Block {
// Fetch list of legacy QORA holders who haven't reached their cap of QORT reward.
List<EligibleQoraHolderData> qoraHolders = this.repository.getAccountRepository().getEligibleLegacyQoraHolders(isProcessingNotOrphaning ? null : this.blockData.getHeight());
final boolean haveQoraHolders = !qoraHolders.isEmpty();
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShare();
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShareAtHeight(this.blockData.getHeight());
// Perform account-level-based reward scaling if appropriate
if (!haveFounders) {

View File

@@ -68,14 +68,28 @@ public class BlockChain {
atFindNextTransactionFix,
newBlockSigHeight,
shareBinFix,
sharesByLevelV2Height,
rewardShareLimitTimestamp,
calcChainWeightTimestamp,
transactionV5Timestamp;
transactionV5Timestamp,
transactionV6Timestamp,
disableReferenceTimestamp,
increaseOnlineAccountsDifficultyTimestamp,
onlineAccountMinterLevelValidationHeight,
selfSponsorshipAlgoV1Height,
feeValidationFixTimestamp,
chatReferenceTimestamp,
arbitraryOptionalFeeTimestamp;
}
// Custom transaction fees
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private long nameRegistrationUnitFee;
private long nameRegistrationUnitFeeTimestamp;
/** Unit fees by transaction timestamp */
public static class UnitFeesByTimestamp {
public long timestamp;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long fee;
}
private List<UnitFeesByTimestamp> nameRegistrationUnitFees;
/** Map of which blockchain features are enabled when (height/timestamp) */
@XmlJavaTypeAdapter(StringLongMapXmlAdapter.class)
@@ -87,6 +101,13 @@ public class BlockChain {
/** Whether only one registered name is allowed per account. */
private boolean oneNamePerAccount = false;
/** Checkpoints */
public static class Checkpoint {
public int height;
public String signature;
}
private List<Checkpoint> checkpoints;
/** Block rewards by block height */
public static class RewardByHeight {
public int height;
@@ -96,23 +117,48 @@ public class BlockChain {
private List<RewardByHeight> rewardsByHeight;
/** Share of block reward/fees by account level */
public static class AccountLevelShareBin {
public static class AccountLevelShareBin implements Cloneable {
public int id;
public List<Integer> levels;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long share;
}
private List<AccountLevelShareBin> sharesByLevel;
/** Generated lookup of share-bin by account level */
private AccountLevelShareBin[] shareBinsByLevel;
/** Share of block reward/fees to legacy QORA coin holders */
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private Long qoraHoldersShare;
public Object clone() {
AccountLevelShareBin shareBinCopy = new AccountLevelShareBin();
List<Integer> levelsCopy = new ArrayList<>();
for (Integer level : this.levels) {
levelsCopy.add(level);
}
shareBinCopy.id = this.id;
shareBinCopy.levels = levelsCopy;
shareBinCopy.share = this.share;
return shareBinCopy;
}
}
private List<AccountLevelShareBin> sharesByLevelV1;
private List<AccountLevelShareBin> sharesByLevelV2;
/** Generated lookup of share-bin by account level */
private AccountLevelShareBin[] shareBinsByLevelV1;
private AccountLevelShareBin[] shareBinsByLevelV2;
/** Share of block reward/fees to legacy QORA coin holders, by block height */
public static class ShareByHeight {
public int height;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
public long share;
}
private List<ShareByHeight> qoraHoldersShareByHeight;
/** How many legacy QORA per 1 QORT of block reward. */
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private Long qoraPerQortReward;
/** Minimum number of accounts before a share bin is considered activated */
private int minAccountsToActivateShareBin;
/** Min level at which share bin activation takes place; lower levels allow less than minAccountsPerShareBin */
private int shareBinActivationMinLevel;
/**
* Number of minted blocks required to reach next level from previous.
* <p>
@@ -150,7 +196,7 @@ public class BlockChain {
private int minAccountLevelToMint;
private int minAccountLevelForBlockSubmissions;
private int minAccountLevelToRewardShare;
private int maxRewardSharesPerMintingAccount;
private int maxRewardSharesPerFounderMintingAccount;
private int founderEffectiveMintingLevel;
/** Minimum time to retain online account signatures (ms) for block validity checks. */
@@ -158,6 +204,20 @@ public class BlockChain {
/** Maximum time to retain online account signatures (ms) for block validity checks, to allow for clock variance. */
private long onlineAccountSignaturesMaxLifetime;
/** Feature trigger timestamp for ONLINE_ACCOUNTS_MODULUS time interval increase. Can't use
* featureTriggers because unit tests need to set this value via Reflection. */
private long onlineAccountsModulusV2Timestamp;
/** Snapshot timestamp for self sponsorship algo V1 */
private long selfSponsorshipAlgoV1SnapshotTimestamp;
/** Max reward shares by block height */
public static class MaxRewardSharesByTimestamp {
public long timestamp;
public int maxShares;
}
private List<MaxRewardSharesByTimestamp> maxRewardSharesByTimestamp;
/** Settings relating to CIYAM AT feature. */
public static class CiyamAtSettings {
/** Fee per step/op-code executed. */
@@ -306,14 +366,14 @@ public class BlockChain {
return this.maxBlockSize;
}
// Custom transaction fees
public long getNameRegistrationUnitFee() {
return this.nameRegistrationUnitFee;
// Online accounts
public long getOnlineAccountsModulusV2Timestamp() {
return this.onlineAccountsModulusV2Timestamp;
}
public long getNameRegistrationUnitFeeTimestamp() {
// FUTURE: we could use a separate structure to indicate fee adjustments for different transaction types
return this.nameRegistrationUnitFeeTimestamp;
// Self sponsorship algo
public long getSelfSponsorshipAlgoV1SnapshotTimestamp() {
return this.selfSponsorshipAlgoV1SnapshotTimestamp;
}
/** Returns true if approval-needing transaction types require a txGroupId other than NO_GROUP. */
@@ -329,16 +389,28 @@ public class BlockChain {
return this.oneNamePerAccount;
}
public List<Checkpoint> getCheckpoints() {
return this.checkpoints;
}
public List<RewardByHeight> getBlockRewardsByHeight() {
return this.rewardsByHeight;
}
public List<AccountLevelShareBin> getAccountLevelShareBins() {
return this.sharesByLevel;
public List<AccountLevelShareBin> getAccountLevelShareBinsV1() {
return this.sharesByLevelV1;
}
public AccountLevelShareBin[] getShareBinsByAccountLevel() {
return this.shareBinsByLevel;
public List<AccountLevelShareBin> getAccountLevelShareBinsV2() {
return this.sharesByLevelV2;
}
public AccountLevelShareBin[] getShareBinsByAccountLevelV1() {
return this.shareBinsByLevelV1;
}
public AccountLevelShareBin[] getShareBinsByAccountLevelV2() {
return this.shareBinsByLevelV2;
}
public List<Integer> getBlocksNeededByLevel() {
@@ -349,14 +421,18 @@ public class BlockChain {
return this.cumulativeBlocksByLevel;
}
public long getQoraHoldersShare() {
return this.qoraHoldersShare;
}
public long getQoraPerQortReward() {
return this.qoraPerQortReward;
}
public int getMinAccountsToActivateShareBin() {
return this.minAccountsToActivateShareBin;
}
public int getShareBinActivationMinLevel() {
return this.shareBinActivationMinLevel;
}
public int getMinAccountLevelToMint() {
return this.minAccountLevelToMint;
}
@@ -369,8 +445,8 @@ public class BlockChain {
return this.minAccountLevelToRewardShare;
}
public int getMaxRewardSharesPerMintingAccount() {
return this.maxRewardSharesPerMintingAccount;
public int getMaxRewardSharesPerFounderMintingAccount() {
return this.maxRewardSharesPerFounderMintingAccount;
}
public int getFounderEffectiveMintingLevel() {
@@ -403,6 +479,14 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.shareBinFix.name()).intValue();
}
public int getSharesByLevelV2Height() {
return this.featureTriggers.get(FeatureTrigger.sharesByLevelV2Height.name()).intValue();
}
public long getRewardShareLimitTimestamp() {
return this.featureTriggers.get(FeatureTrigger.rewardShareLimitTimestamp.name()).longValue();
}
public long getCalcChainWeightTimestamp() {
return this.featureTriggers.get(FeatureTrigger.calcChainWeightTimestamp.name()).longValue();
}
@@ -411,6 +495,39 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.transactionV5Timestamp.name()).longValue();
}
public long getTransactionV6Timestamp() {
return this.featureTriggers.get(FeatureTrigger.transactionV6Timestamp.name()).longValue();
}
public long getDisableReferenceTimestamp() {
return this.featureTriggers.get(FeatureTrigger.disableReferenceTimestamp.name()).longValue();
}
public long getIncreaseOnlineAccountsDifficultyTimestamp() {
return this.featureTriggers.get(FeatureTrigger.increaseOnlineAccountsDifficultyTimestamp.name()).longValue();
}
public int getSelfSponsorshipAlgoV1Height() {
return this.featureTriggers.get(FeatureTrigger.selfSponsorshipAlgoV1Height.name()).intValue();
}
public long getOnlineAccountMinterLevelValidationHeight() {
return this.featureTriggers.get(FeatureTrigger.onlineAccountMinterLevelValidationHeight.name()).intValue();
}
public long getFeeValidationFixTimestamp() {
return this.featureTriggers.get(FeatureTrigger.feeValidationFixTimestamp.name()).longValue();
}
public long getChatReferenceTimestamp() {
return this.featureTriggers.get(FeatureTrigger.chatReferenceTimestamp.name()).longValue();
}
public long getArbitraryOptionalFeeTimestamp() {
return this.featureTriggers.get(FeatureTrigger.arbitraryOptionalFeeTimestamp.name()).longValue();
}
// More complex getters for aspects that change by height or timestamp
public long getRewardAtHeight(int ourHeight) {
@@ -430,6 +547,32 @@ public class BlockChain {
throw new IllegalStateException(String.format("No block timing info available for height %d", ourHeight));
}
public long getNameRegistrationUnitFeeAtTimestamp(long ourTimestamp) {
for (int i = nameRegistrationUnitFees.size() - 1; i >= 0; --i)
if (nameRegistrationUnitFees.get(i).timestamp <= ourTimestamp)
return nameRegistrationUnitFees.get(i).fee;
// Default to system-wide unit fee
return this.getUnitFee();
}
public int getMaxRewardSharesAtTimestamp(long ourTimestamp) {
for (int i = maxRewardSharesByTimestamp.size() - 1; i >= 0; --i)
if (maxRewardSharesByTimestamp.get(i).timestamp <= ourTimestamp)
return maxRewardSharesByTimestamp.get(i).maxShares;
return 0;
}
public long getQoraHoldersShareAtHeight(int ourHeight) {
// Scan through for QORA share at our height
for (int i = qoraHoldersShareByHeight.size() - 1; i >= 0; --i)
if (qoraHoldersShareByHeight.get(i).height <= ourHeight)
return qoraHoldersShareByHeight.get(i).share;
return 0;
}
/** Validate blockchain config read from JSON */
private void validateConfig() {
if (this.genesisInfo == null)
@@ -438,11 +581,14 @@ public class BlockChain {
if (this.rewardsByHeight == null)
Settings.throwValidationError("No \"rewardsByHeight\" entry found in blockchain config");
if (this.sharesByLevel == null)
Settings.throwValidationError("No \"sharesByLevel\" entry found in blockchain config");
if (this.sharesByLevelV1 == null)
Settings.throwValidationError("No \"sharesByLevelV1\" entry found in blockchain config");
if (this.qoraHoldersShare == null)
Settings.throwValidationError("No \"qoraHoldersShare\" entry found in blockchain config");
if (this.sharesByLevelV2 == null)
Settings.throwValidationError("No \"sharesByLevelV2\" entry found in blockchain config");
if (this.qoraHoldersShareByHeight == null)
Settings.throwValidationError("No \"qoraHoldersShareByHeight\" entry found in blockchain config");
if (this.qoraPerQortReward == null)
Settings.throwValidationError("No \"qoraPerQortReward\" entry found in blockchain config");
@@ -479,13 +625,22 @@ public class BlockChain {
if (!this.featureTriggers.containsKey(featureTrigger.name()))
Settings.throwValidationError(String.format("Missing feature trigger \"%s\" in blockchain config", featureTrigger.name()));
// Check block reward share bounds
long totalShare = this.qoraHoldersShare;
// Check block reward share bounds (V1)
long totalShareV1 = this.qoraHoldersShareByHeight.get(0).share;
// Add share percents for account-level-based rewards
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevel)
totalShare += accountLevelShareBin.share;
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV1)
totalShareV1 += accountLevelShareBin.share;
if (totalShare < 0 || totalShare > 1_00000000L)
if (totalShareV1 < 0 || totalShareV1 > 1_00000000L)
Settings.throwValidationError("Total non-founder share out of bounds (0<x<1e8)");
// Check block reward share bounds (V2)
long totalShareV2 = this.qoraHoldersShareByHeight.get(1).share;
// Add share percents for account-level-based rewards
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV2)
totalShareV2 += accountLevelShareBin.share;
if (totalShareV2 < 0 || totalShareV2 > 1_00000000L)
Settings.throwValidationError("Total non-founder share out of bounds (0<x<1e8)");
}
@@ -501,23 +656,34 @@ public class BlockChain {
cumulativeBlocks += this.blocksNeededByLevel.get(level);
}
// Generate lookup-array for account-level share bins
AccountLevelShareBin lastAccountLevelShareBin = this.sharesByLevel.get(this.sharesByLevel.size() - 1);
final int lastLevel = lastAccountLevelShareBin.levels.get(lastAccountLevelShareBin.levels.size() - 1);
this.shareBinsByLevel = new AccountLevelShareBin[lastLevel];
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevel)
// Generate lookup-array for account-level share bins (V1)
AccountLevelShareBin lastAccountLevelShareBinV1 = this.sharesByLevelV1.get(this.sharesByLevelV1.size() - 1);
final int lastLevelV1 = lastAccountLevelShareBinV1.levels.get(lastAccountLevelShareBinV1.levels.size() - 1);
this.shareBinsByLevelV1 = new AccountLevelShareBin[lastLevelV1];
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV1)
for (int level : accountLevelShareBin.levels)
// level 1 stored at index 0, level 2 stored at index 1, etc.
// level 0 not allowed
this.shareBinsByLevel[level - 1] = accountLevelShareBin;
this.shareBinsByLevelV1[level - 1] = accountLevelShareBin;
// Generate lookup-array for account-level share bins (V2)
AccountLevelShareBin lastAccountLevelShareBinV2 = this.sharesByLevelV2.get(this.sharesByLevelV2.size() - 1);
final int lastLevelV2 = lastAccountLevelShareBinV2.levels.get(lastAccountLevelShareBinV2.levels.size() - 1);
this.shareBinsByLevelV2 = new AccountLevelShareBin[lastLevelV2];
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV2)
for (int level : accountLevelShareBin.levels)
// level 1 stored at index 0, level 2 stored at index 1, etc.
// level 0 not allowed
this.shareBinsByLevelV2[level - 1] = accountLevelShareBin;
// Convert collections to unmodifiable form
this.rewardsByHeight = Collections.unmodifiableList(this.rewardsByHeight);
this.sharesByLevel = Collections.unmodifiableList(this.sharesByLevel);
this.sharesByLevelV1 = Collections.unmodifiableList(this.sharesByLevelV1);
this.sharesByLevelV2 = Collections.unmodifiableList(this.sharesByLevelV2);
this.blocksNeededByLevel = Collections.unmodifiableList(this.blocksNeededByLevel);
this.cumulativeBlocksByLevel = Collections.unmodifiableList(this.cumulativeBlocksByLevel);
this.blockTimingsByHeight = Collections.unmodifiableList(this.blockTimingsByHeight);
this.qoraHoldersShareByHeight = Collections.unmodifiableList(this.qoraHoldersShareByHeight);
}
/**
@@ -529,6 +695,7 @@ public class BlockChain {
boolean isTopOnly = Settings.getInstance().isTopOnly();
boolean archiveEnabled = Settings.getInstance().isArchiveEnabled();
boolean isLite = Settings.getInstance().isLite();
boolean canBootstrap = Settings.getInstance().getBootstrap();
boolean needsArchiveRebuild = false;
BlockData chainTip;
@@ -549,22 +716,44 @@ public class BlockChain {
}
}
}
// Validate checkpoints
// Limited to topOnly nodes for now, in order to reduce risk, and to solve a real-world problem with divergent topOnly nodes
// TODO: remove the isTopOnly conditional below once this feature has had more testing time
if (isTopOnly && !isLite) {
List<Checkpoint> checkpoints = BlockChain.getInstance().getCheckpoints();
for (Checkpoint checkpoint : checkpoints) {
BlockData blockData = repository.getBlockRepository().fromHeight(checkpoint.height);
if (blockData == null) {
// Try the archive
blockData = repository.getBlockArchiveRepository().fromHeight(checkpoint.height);
}
if (blockData == null) {
LOGGER.trace("Couldn't find block for height {}", checkpoint.height);
// This is likely due to the block being pruned, so is safe to ignore.
// Continue, as there might be other blocks we can check more definitively.
continue;
}
byte[] signature = Base58.decode(checkpoint.signature);
if (!Arrays.equals(signature, blockData.getSignature())) {
LOGGER.info("Error: block at height {} with signature {} doesn't match checkpoint sig: {}. Bootstrapping...", checkpoint.height, Base58.encode(blockData.getSignature()), checkpoint.signature);
needsArchiveRebuild = true;
break;
}
LOGGER.info("Block at height {} matches checkpoint signature", blockData.getHeight());
}
}
}
boolean hasBlocks = (chainTip != null && chainTip.getHeight() > 1);
// Check first block is Genesis Block
if (!isGenesisBlockValid() || needsArchiveRebuild) {
try {
rebuildBlockchain();
if (isTopOnly && hasBlocks) {
// Top-only mode is enabled and we have blocks, so it's possible that the genesis block has been pruned
// It's best not to validate it, and there's no real need to
} else {
// Check first block is Genesis Block
if (!isGenesisBlockValid() || needsArchiveRebuild) {
try {
rebuildBlockchain();
} catch (InterruptedException e) {
throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
}
} catch (InterruptedException e) {
throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
}
}
@@ -573,9 +762,7 @@ public class BlockChain {
try (final Repository repository = RepositoryManager.getRepository()) {
repository.checkConsistency();
// Set the number of blocks to validate based on the pruned state of the chain
// If pruned, subtract an extra 10 to allow room for error
int blocksToValidate = (isTopOnly || archiveEnabled) ? Settings.getInstance().getPruneBlockLimit() - 10 : 1440;
int blocksToValidate = Math.min(Settings.getInstance().getPruneBlockLimit() - 10, 1440);
int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - blocksToValidate, 1);
BlockData detachedBlockData = repository.getBlockRepository().getDetachedBlockSignature(startHeight);
@@ -684,6 +871,9 @@ public class BlockChain {
BlockData orphanBlockData = repository.getBlockRepository().fromHeight(height);
while (height > targetHeight) {
if (Controller.isStopping()) {
return false;
}
LOGGER.info(String.format("Forcably orphaning block %d", height));
Block block = new Block(repository, orphanBlockData);

View File

@@ -0,0 +1,133 @@
package org.qortal.block;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.account.SelfSponsorshipAlgoV1;
import org.qortal.api.model.AccountPenaltyStats;
import org.qortal.crypto.Crypto;
import org.qortal.data.account.AccountData;
import org.qortal.data.account.AccountPenaltyData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.utils.Base58;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.stream.Collectors;
/**
* Self Sponsorship AlgoV1 Block
* <p>
* Selected block for the initial run on the "self sponsorship detection algorithm"
*/
public final class SelfSponsorshipAlgoV1Block {
private static final Logger LOGGER = LogManager.getLogger(SelfSponsorshipAlgoV1Block.class);
private SelfSponsorshipAlgoV1Block() {
/* Do not instantiate */
}
public static void processAccountPenalties(Block block) throws DataException {
LOGGER.info("Running algo for block processing - this will take a while...");
logPenaltyStats(block.repository);
long startTime = System.currentTimeMillis();
Set<AccountPenaltyData> penalties = getAccountPenalties(block.repository, -5000000);
block.repository.getAccountRepository().updateBlocksMintedPenalties(penalties);
long totalTime = System.currentTimeMillis() - startTime;
String hash = getHash(penalties.stream().map(p -> p.getAddress()).collect(Collectors.toList()));
LOGGER.info("{} penalty addresses processed (hash: {}). Total time taken: {} seconds", penalties.size(), hash, (int)(totalTime / 1000.0f));
logPenaltyStats(block.repository);
int updatedCount = updateAccountLevels(block.repository, penalties);
LOGGER.info("Account levels updated for {} penalty addresses", updatedCount);
}
public static void orphanAccountPenalties(Block block) throws DataException {
LOGGER.info("Running algo for block orphaning - this will take a while...");
logPenaltyStats(block.repository);
long startTime = System.currentTimeMillis();
Set<AccountPenaltyData> penalties = getAccountPenalties(block.repository, 5000000);
block.repository.getAccountRepository().updateBlocksMintedPenalties(penalties);
long totalTime = System.currentTimeMillis() - startTime;
String hash = getHash(penalties.stream().map(p -> p.getAddress()).collect(Collectors.toList()));
LOGGER.info("{} penalty addresses orphaned (hash: {}). Total time taken: {} seconds", penalties.size(), hash, (int)(totalTime / 1000.0f));
logPenaltyStats(block.repository);
int updatedCount = updateAccountLevels(block.repository, penalties);
LOGGER.info("Account levels updated for {} penalty addresses", updatedCount);
}
public static Set<AccountPenaltyData> getAccountPenalties(Repository repository, int penalty) throws DataException {
final long snapshotTimestamp = BlockChain.getInstance().getSelfSponsorshipAlgoV1SnapshotTimestamp();
Set<AccountPenaltyData> penalties = new LinkedHashSet<>();
List<String> addresses = repository.getTransactionRepository().getConfirmedRewardShareCreatorsExcludingSelfShares();
for (String address : addresses) {
//System.out.println(String.format("address: %s", address));
SelfSponsorshipAlgoV1 selfSponsorshipAlgoV1 = new SelfSponsorshipAlgoV1(repository, address, snapshotTimestamp, false);
selfSponsorshipAlgoV1.run();
//System.out.println(String.format("Penalty addresses: %d", selfSponsorshipAlgoV1.getPenaltyAddresses().size()));
for (String penaltyAddress : selfSponsorshipAlgoV1.getPenaltyAddresses()) {
penalties.add(new AccountPenaltyData(penaltyAddress, penalty));
}
}
return penalties;
}
private static int updateAccountLevels(Repository repository, Set<AccountPenaltyData> accountPenalties) throws DataException {
final List<Integer> cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
int updatedCount = 0;
for (AccountPenaltyData penaltyData : accountPenalties) {
AccountData accountData = repository.getAccountRepository().getAccount(penaltyData.getAddress());
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
// Shortcut for penalties
if (effectiveBlocksMinted < 0) {
accountData.setLevel(0);
repository.getAccountRepository().setLevel(accountData);
updatedCount++;
LOGGER.trace(() -> String.format("Block minter %s dropped to level %d", accountData.getAddress(), accountData.getLevel()));
continue;
}
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel) {
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
accountData.setLevel(newLevel);
repository.getAccountRepository().setLevel(accountData);
updatedCount++;
LOGGER.trace(() -> String.format("Block minter %s increased to level %d", accountData.getAddress(), accountData.getLevel()));
break;
}
}
}
return updatedCount;
}
private static void logPenaltyStats(Repository repository) {
try {
LOGGER.info(getPenaltyStats(repository));
} catch (DataException e) {}
}
private static AccountPenaltyStats getPenaltyStats(Repository repository) throws DataException {
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
return AccountPenaltyStats.fromAccounts(accounts);
}
public static String getHash(List<String> penaltyAddresses) {
if (penaltyAddresses == null || penaltyAddresses.isEmpty()) {
return null;
}
Collections.sort(penaltyAddresses);
return Base58.encode(Crypto.digest(StringUtils.join(penaltyAddresses).getBytes(StandardCharsets.UTF_8)));
}
}

View File

@@ -15,6 +15,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -40,6 +41,7 @@ public class AutoUpdate extends Thread {
public static final String JAR_FILENAME = "qortal.jar";
public static final String NEW_JAR_FILENAME = "new-" + JAR_FILENAME;
public static final String AGENTLIB_JVM_HOLDER_ARG = "-DQORTAL_agentlib=";
private static final Logger LOGGER = LogManager.getLogger(AutoUpdate.class);
private static final long CHECK_INTERVAL = 20 * 60 * 1000L; // ms
@@ -243,6 +245,11 @@ public class AutoUpdate extends Thread {
// JVM arguments
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
// Disable, but retain, any -agentlib JVM arg as sub-process might fail if it tries to reuse same port
javaCmd = javaCmd.stream()
.map(arg -> arg.replace("-agentlib", AGENTLIB_JVM_HOLDER_ARG))
.collect(Collectors.toList());
// Remove JNI options as they won't be supported by command-line 'java'
// These are typically added by the AdvancedInstaller Java launcher EXE
javaCmd.removeAll(Arrays.asList("abort", "exit", "vfprintf"));
@@ -261,10 +268,19 @@ public class AutoUpdate extends Thread {
Translator.INSTANCE.translate("SysTray", "APPLYING_UPDATE_AND_RESTARTING"),
MessageType.INFO);
new ProcessBuilder(javaCmd).start();
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
// New process will inherit our stdout and stderr
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = processBuilder.start();
// Nothing to pipe to new process, so close output stream (process's stdin)
process.getOutputStream().close();
return true; // applying update OK
} catch (IOException e) {
} catch (Exception e) {
LOGGER.error(String.format("Failed to apply update: %s", e.getMessage()));
try {
@@ -277,4 +293,77 @@ public class AutoUpdate extends Thread {
}
}
public static boolean attemptRestart() {
LOGGER.info(String.format("Restarting node..."));
// Give repository a chance to backup in case things go badly wrong (if enabled)
if (Settings.getInstance().getRepositoryBackupInterval() > 0) {
try {
// Timeout if the database isn't ready for backing up after 60 seconds
long timeout = 60 * 1000L;
RepositoryManager.backup(true, "backup", timeout);
} catch (TimeoutException e) {
LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage());
// Continue with the node restart anyway...
}
}
// Call ApplyUpdate to end this process (unlocking current JAR so it can be replaced)
String javaHome = System.getProperty("java.home");
LOGGER.debug(String.format("Java home: %s", javaHome));
Path javaBinary = Paths.get(javaHome, "bin", "java");
LOGGER.debug(String.format("Java binary: %s", javaBinary));
try {
List<String> javaCmd = new ArrayList<>();
// Java runtime binary itself
javaCmd.add(javaBinary.toString());
// JVM arguments
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
// Disable, but retain, any -agentlib JVM arg as sub-process might fail if it tries to reuse same port
javaCmd = javaCmd.stream()
.map(arg -> arg.replace("-agentlib", AGENTLIB_JVM_HOLDER_ARG))
.collect(Collectors.toList());
// Remove JNI options as they won't be supported by command-line 'java'
// These are typically added by the AdvancedInstaller Java launcher EXE
javaCmd.removeAll(Arrays.asList("abort", "exit", "vfprintf"));
// Call ApplyUpdate using JAR
javaCmd.addAll(Arrays.asList("-cp", JAR_FILENAME, ApplyUpdate.class.getCanonicalName()));
// Add command-line args saved from start-up
String[] savedArgs = Controller.getInstance().getSavedArgs();
if (savedArgs != null)
javaCmd.addAll(Arrays.asList(savedArgs));
LOGGER.info(String.format("Restarting node with: %s", String.join(" ", javaCmd)));
SysTray.getInstance().showMessage(Translator.INSTANCE.translate("SysTray", "AUTO_UPDATE"), //TODO
Translator.INSTANCE.translate("SysTray", "APPLYING_UPDATE_AND_RESTARTING"), //TODO
MessageType.INFO);
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
// New process will inherit our stdout and stderr
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = processBuilder.start();
// Nothing to pipe to new process, so close output stream (process's stdin)
process.getOutputStream().close();
return true; // restarting node OK
} catch (Exception e) {
LOGGER.error(String.format("Failed to restart node: %s", e.getMessage()));
return true; // repo was okay, even if applying update failed
}
}
}

View File

@@ -35,6 +35,8 @@ import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import static org.junit.Assert.assertNotNull;
// Minting new blocks
public class BlockMinter extends Thread {
@@ -61,8 +63,12 @@ public class BlockMinter extends Thread {
public void run() {
Thread.currentThread().setName("BlockMinter");
try (final Repository repository = RepositoryManager.getRepository()) {
if (Settings.getInstance().getWipeUnconfirmedOnStart()) {
if (Settings.getInstance().isTopOnly() || Settings.getInstance().isLite()) {
// Top only and lite nodes do not sign blocks
return;
}
if (Settings.getInstance().getWipeUnconfirmedOnStart()) {
try (final Repository repository = RepositoryManager.getRepository()) {
// Wipe existing unconfirmed transactions
List<TransactionData> unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions();
@@ -72,356 +78,381 @@ public class BlockMinter extends Thread {
}
repository.saveChanges();
} catch (DataException e) {
LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage());
// Fall-through to normal behaviour in case we can recover
}
}
BlockData previousBlockData = null;
// Vars to keep track of blocks that were skipped due to chain weight
byte[] parentSignatureForLastLowWeightBlock = null;
Long timeOfLastLowWeightBlock = null;
List<Block> newBlocks = new ArrayList<>();
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
try (final Repository repository = RepositoryManager.getRepository()) {
// Going to need this a lot...
BlockRepository blockRepository = repository.getBlockRepository();
BlockData previousBlockData = null;
// Vars to keep track of blocks that were skipped due to chain weight
byte[] parentSignatureForLastLowWeightBlock = null;
Long timeOfLastLowWeightBlock = null;
List<Block> newBlocks = new ArrayList<>();
// Flags for tracking change in whether minting is possible,
// so we can notify Controller, and further update SysTray, etc.
boolean isMintingPossible = false;
boolean wasMintingPossible = isMintingPossible;
while (running) {
repository.discardChanges(); // Free repository locks, if any
if (isMintingPossible != wasMintingPossible)
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
wasMintingPossible = isMintingPossible;
// Sleep for a while
Thread.sleep(1000);
isMintingPossible = false;
final Long now = NTP.getTime();
if (now == null)
continue;
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null)
continue;
// No online accounts? (e.g. during startup)
if (OnlineAccountsManager.getInstance().getOnlineAccounts().isEmpty())
continue;
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
// No minting accounts?
if (mintingAccountsData.isEmpty())
continue;
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
// Note that minting accounts are actually reward-shares in Qortal
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
while (madi.hasNext()) {
MintingAccountData mintingAccountData = madi.next();
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
if (rewardShareData == null) {
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
madi.remove();
continue;
}
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
if (!mintingAccount.canMint()) {
// Minting-account component of reward-share can no longer mint - disregard
madi.remove();
continue;
}
// Optional (non-validated) prevention of block submissions below a defined level.
// This is an unvalidated version of Blockchain.minAccountLevelToMint
// and exists only to reduce block candidates by default.
int level = mintingAccount.getEffectiveMintingLevel();
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
madi.remove();
continue;
}
}
// Needs a mutable copy of the unmodifiableList
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
BlockData lastBlockData = blockRepository.getLastBlock();
// Disregard peers that have "misbehaved" recently
peers.removeIf(Controller.hasMisbehaved);
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
if (Synchronizer.getInstance().getRecoveryMode() == false)
peers.removeIf(Controller.hasNoRecentBlock);
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
continue;
// If we are stuck on an invalid block, we should allow an alternative to be minted
boolean recoverInvalidBlock = false;
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
// We've had at least one invalid block
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
// Assume that the chain has stalled because there is no alternative valid candidate
// Enter recovery mode to allow alternative, valid candidates to be minted
recoverInvalidBlock = true;
}
}
}
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
continue;
// There are enough peers with a recent block and our latest block is recent
// so go ahead and mint a block if possible.
isMintingPossible = true;
// Check blockchain hasn't changed
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
previousBlockData = lastBlockData;
newBlocks.clear();
// Reduce log timeout
logTimeout = 10 * 1000L;
// Last low weight block is no longer valid
parentSignatureForLastLowWeightBlock = null;
}
// Discard accounts we have already built blocks with
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
// Do we need to build any potential new blocks?
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
// We might need to sit the next block out, if one of our minting accounts signed the previous one
final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
if (mintedLastBlock) {
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
continue;
}
if (parentSignatureForLastLowWeightBlock != null) {
// The last iteration found a higher weight block in the network, so sleep for a while
// to allow is to sync the higher weight chain. We are sleeping here rather than when
// detected as we don't want to hold the blockchain lock open.
LOGGER.info("Sleeping for 10 seconds...");
Thread.sleep(10 * 1000L);
}
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
// First block does the AT heavy-lifting
if (newBlocks.isEmpty()) {
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
if (newBlock == null) {
// For some reason we can't mint right now
moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block"));
continue;
}
newBlocks.add(newBlock);
} else {
// The blocks for other minters require less effort...
Block newBlock = newBlocks.get(0).remint(mintingAccount);
if (newBlock == null) {
// For some reason we can't mint right now
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
continue;
}
newBlocks.add(newBlock);
}
}
// No potential block candidates?
if (newBlocks.isEmpty())
continue;
// Make sure we're the only thread modifying the blockchain
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
continue;
}
boolean newBlockMinted = false;
Block newBlock = null;
try {
// Clear repository session state so we have latest view of data
// Free up any repository locks
repository.discardChanges();
// Now that we have blockchain lock, do final check that chain hasn't changed
BlockData latestBlockData = blockRepository.getLastBlock();
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
// Sleep for a while.
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
isMintingPossible = false;
final Long now = NTP.getTime();
if (now == null)
continue;
List<Block> goodBlocks = new ArrayList<>();
for (Block testBlock : newBlocks) {
// Is new block's timestamp valid yet?
// We do a separate check as some timestamp checks are skipped for testchains
if (testBlock.isTimestampValid() != ValidationResult.OK)
continue;
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null)
continue;
testBlock.preProcess();
// No online accounts for current timestamp? (e.g. during startup)
if (!OnlineAccountsManager.getInstance().hasOnlineAccounts())
continue;
// Is new block valid yet? (Before adding unconfirmed transactions)
ValidationResult result = testBlock.isValid();
if (result != ValidationResult.OK) {
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
// No minting accounts?
if (mintingAccountsData.isEmpty())
continue;
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
// Note that minting accounts are actually reward-shares in Qortal
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
while (madi.hasNext()) {
MintingAccountData mintingAccountData = madi.next();
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
if (rewardShareData == null) {
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
madi.remove();
continue;
}
goodBlocks.add(testBlock);
}
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
if (!mintingAccount.canMint()) {
// Minting-account component of reward-share can no longer mint - disregard
madi.remove();
continue;
}
if (goodBlocks.isEmpty())
continue;
// Pick best block
final int parentHeight = previousBlockData.getHeight();
final byte[] parentBlockSignature = previousBlockData.getSignature();
BigInteger bestWeight = null;
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
BlockData blockData = goodBlocks.get(bi).getBlockData();
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
blockSummaryData.setMinterLevel(minterLevel);
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
newBlock = goodBlocks.get(bi);
bestWeight = blockWeight;
// Optional (non-validated) prevention of block submissions below a defined level.
// This is an unvalidated version of Blockchain.minAccountLevelToMint
// and exists only to reduce block candidates by default.
int level = mintingAccount.getEffectiveMintingLevel();
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
madi.remove();
continue;
}
}
try {
if (this.higherWeightChainExists(repository, bestWeight)) {
// Needs a mutable copy of the unmodifiableList
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
BlockData lastBlockData = blockRepository.getLastBlock();
// Check if the base block has updated since the last time we were here
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
// We've switched to a different chain, so reset the timer
timeOfLastLowWeightBlock = NTP.getTime();
// Disregard peers that have "misbehaved" recently
peers.removeIf(Controller.hasMisbehaved);
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
if (Synchronizer.getInstance().getRecoveryMode() == false)
peers.removeIf(Controller.hasNoRecentBlock);
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
continue;
// If we are stuck on an invalid block, we should allow an alternative to be minted
boolean recoverInvalidBlock = false;
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
// We've had at least one invalid block
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
// Assume that the chain has stalled because there is no alternative valid candidate
// Enter recovery mode to allow alternative, valid candidates to be minted
recoverInvalidBlock = true;
}
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
}
}
// If less than 30 seconds has passed since first detection the higher weight chain,
// we should skip our block submission to give us the opportunity to sync to the better chain
if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) {
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
continue;
// There are enough peers with a recent block and our latest block is recent
// so go ahead and mint a block if possible.
isMintingPossible = true;
// Check blockchain hasn't changed
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
previousBlockData = lastBlockData;
newBlocks.clear();
// Reduce log timeout
logTimeout = 10 * 1000L;
// Last low weight block is no longer valid
parentSignatureForLastLowWeightBlock = null;
}
// Discard accounts we have already built blocks with
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
// Do we need to build any potential new blocks?
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
// We might need to sit the next block out, if one of our minting accounts signed the previous one
// Skip this check for single node testnets, since they definitely need to mint every block
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
if (mintedLastBlock && !isSingleNodeTestnet) {
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
continue;
}
if (parentSignatureForLastLowWeightBlock != null) {
// The last iteration found a higher weight block in the network, so sleep for a while
// to allow is to sync the higher weight chain. We are sleeping here rather than when
// detected as we don't want to hold the blockchain lock open.
LOGGER.info("Sleeping for 10 seconds...");
Thread.sleep(10 * 1000L);
}
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
// First block does the AT heavy-lifting
if (newBlocks.isEmpty()) {
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
if (newBlock == null) {
// For some reason we can't mint right now
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
continue;
}
else {
// More than 30 seconds have passed, so we should submit our block candidate anyway.
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
newBlocks.add(newBlock);
} else {
// The blocks for other minters require less effort...
Block newBlock = newBlocks.get(0).remint(mintingAccount);
if (newBlock == null) {
// For some reason we can't mint right now
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
continue;
}
newBlocks.add(newBlock);
}
else {
LOGGER.debug("No higher weight chain found in peers");
}
} catch (DataException e) {
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
}
// Discard any uncommitted changes as a result of the higher weight chain detection
repository.discardChanges();
// No potential block candidates?
if (newBlocks.isEmpty())
continue;
// Clear variables that track low weight blocks
parentSignatureForLastLowWeightBlock = null;
timeOfLastLowWeightBlock = null;
// Add unconfirmed transactions
addUnconfirmedTransactions(repository, newBlock);
// Sign to create block's signature
newBlock.sign();
// Is newBlock still valid?
ValidationResult validationResult = newBlock.isValid();
if (validationResult != ValidationResult.OK) {
// No longer valid? Report and discard
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
// Rebuild block candidates, just to be sure
newBlocks.clear();
// Make sure we're the only thread modifying the blockchain
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
continue;
}
// Add to blockchain - something else will notice and broadcast new block to network
boolean newBlockMinted = false;
Block newBlock = null;
try {
newBlock.process();
// Clear repository session state so we have latest view of data
repository.discardChanges();
repository.saveChanges();
// Now that we have blockchain lock, do final check that chain hasn't changed
BlockData latestBlockData = blockRepository.getLastBlock();
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
continue;
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
List<Block> goodBlocks = new ArrayList<>();
boolean wasInvalidBlockDiscarded = false;
Iterator<Block> newBlocksIterator = newBlocks.iterator();
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
while (newBlocksIterator.hasNext()) {
Block testBlock = newBlocksIterator.next();
if (rewardShareData != null) {
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
newBlock.getBlockData().getHeight(),
Base58.encode(newBlock.getBlockData().getSignature()),
Base58.encode(newBlock.getParent().getSignature()),
rewardShareData.getMinter(),
rewardShareData.getRecipient()));
} else {
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
newBlock.getBlockData().getHeight(),
Base58.encode(newBlock.getBlockData().getSignature()),
Base58.encode(newBlock.getParent().getSignature()),
newBlock.getMinter().getAddress()));
// Is new block's timestamp valid yet?
// We do a separate check as some timestamp checks are skipped for testchains
if (testBlock.isTimestampValid() != ValidationResult.OK)
continue;
testBlock.preProcess();
// Is new block valid yet? (Before adding unconfirmed transactions)
ValidationResult result = testBlock.isValid();
if (result != ValidationResult.OK) {
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
newBlocksIterator.remove();
wasInvalidBlockDiscarded = true;
/*
* Bail out fast so that we loop around from the top again.
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
*/
break;
}
goodBlocks.add(testBlock);
}
// Notify network after we're released blockchain lock
newBlockMinted = true;
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
continue;
// Notify Controller
repository.discardChanges(); // clear transaction status to prevent deadlocks
Controller.getInstance().onNewBlock(newBlock.getBlockData());
} catch (DataException e) {
// Unable to process block - report and discard
LOGGER.error("Unable to process newly minted block?", e);
newBlocks.clear();
// Pick best block
final int parentHeight = previousBlockData.getHeight();
final byte[] parentBlockSignature = previousBlockData.getSignature();
BigInteger bestWeight = null;
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
BlockData blockData = goodBlocks.get(bi).getBlockData();
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
blockSummaryData.setMinterLevel(minterLevel);
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
newBlock = goodBlocks.get(bi);
bestWeight = blockWeight;
}
}
try {
if (this.higherWeightChainExists(repository, bestWeight)) {
// Check if the base block has updated since the last time we were here
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
// We've switched to a different chain, so reset the timer
timeOfLastLowWeightBlock = NTP.getTime();
}
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
// If less than 30 seconds has passed since first detection the higher weight chain,
// we should skip our block submission to give us the opportunity to sync to the better chain
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
continue;
} else {
// More than 30 seconds have passed, so we should submit our block candidate anyway.
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
}
} else {
LOGGER.debug("No higher weight chain found in peers");
}
} catch (DataException e) {
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
}
// Discard any uncommitted changes as a result of the higher weight chain detection
repository.discardChanges();
// Clear variables that track low weight blocks
parentSignatureForLastLowWeightBlock = null;
timeOfLastLowWeightBlock = null;
// Add unconfirmed transactions
addUnconfirmedTransactions(repository, newBlock);
// Sign to create block's signature
newBlock.sign();
// Is newBlock still valid?
ValidationResult validationResult = newBlock.isValid();
if (validationResult != ValidationResult.OK) {
// No longer valid? Report and discard
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
// Rebuild block candidates, just to be sure
newBlocks.clear();
continue;
}
// Add to blockchain - something else will notice and broadcast new block to network
try {
newBlock.process();
repository.saveChanges();
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
if (rewardShareData != null) {
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
newBlock.getBlockData().getHeight(),
Base58.encode(newBlock.getBlockData().getSignature()),
Base58.encode(newBlock.getParent().getSignature()),
rewardShareData.getMinter(),
rewardShareData.getRecipient()));
} else {
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
newBlock.getBlockData().getHeight(),
Base58.encode(newBlock.getBlockData().getSignature()),
Base58.encode(newBlock.getParent().getSignature()),
newBlock.getMinter().getAddress()));
}
// Notify network after we're released blockchain lock
newBlockMinted = true;
// Notify Controller
repository.discardChanges(); // clear transaction status to prevent deadlocks
Controller.getInstance().onNewBlock(newBlock.getBlockData());
} catch (DataException e) {
// Unable to process block - report and discard
LOGGER.error("Unable to process newly minted block?", e);
newBlocks.clear();
} catch (ArithmeticException e) {
// Unable to process block - report and discard
LOGGER.error("Unable to process newly minted block?", e);
newBlocks.clear();
}
} finally {
blockchainLock.unlock();
}
} finally {
blockchainLock.unlock();
}
if (newBlockMinted) {
// Broadcast our new chain to network
BlockData newBlockData = newBlock.getBlockData();
if (newBlockMinted) {
// Broadcast our new chain to network
Network.getInstance().broadcastOurChain();
}
Network network = Network.getInstance();
network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData));
} catch (InterruptedException e) {
// We've been interrupted - time to exit
return;
}
}
} catch (DataException e) {
LOGGER.warn("Repository issue while running block minter", e);
} catch (InterruptedException e) {
// We've been interrupted - time to exit
return;
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
}
}
@@ -483,6 +514,21 @@ public class BlockMinter extends Thread {
PrivateKeyAccount mintingAccount = mintingAndOnlineAccounts[0];
Block block = mintTestingBlockRetainingTimestamps(repository, mintingAccount);
assertNotNull("Minted block must not be null", block);
return block;
}
public static Block mintTestingBlockUnvalidated(Repository repository, PrivateKeyAccount... mintingAndOnlineAccounts) throws DataException {
if (!BlockChain.getInstance().isTestChain())
throw new DataException("Ignoring attempt to mint testing block for non-test chain!");
// Ensure mintingAccount is 'online' so blocks can be minted
OnlineAccountsManager.getInstance().ensureTestingAccountsOnline(mintingAndOnlineAccounts);
PrivateKeyAccount mintingAccount = mintingAndOnlineAccounts[0];
return mintTestingBlockRetainingTimestamps(repository, mintingAccount);
}
@@ -490,6 +536,8 @@ public class BlockMinter extends Thread {
BlockData previousBlockData = repository.getBlockRepository().getLastBlock();
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
if (newBlock == null)
return null;
// Make sure we're the only thread modifying the blockchain
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
@@ -552,18 +600,23 @@ public class BlockMinter extends Thread {
// This peer has common block data
CommonBlockData commonBlockData = peer.getCommonBlockData();
BlockSummaryData commonBlockSummaryData = commonBlockData.getCommonBlockSummary();
if (commonBlockData.getChainWeight() != null) {
if (commonBlockData.getChainWeight() != null && peer.getCommonBlockData().getBlockSummariesAfterCommonBlock() != null) {
// The synchronizer has calculated this peer's chain weight
BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
BigInteger peerChainWeight = commonBlockData.getChainWeight();
if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
// This peer has a higher weight chain than ours
LOGGER.debug("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
return true;
if (!Synchronizer.getInstance().containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
// .. and it doesn't hold any invalid blocks
BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
BigInteger peerChainWeight = commonBlockData.getChainWeight();
if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
// This peer has a higher weight chain than ours
LOGGER.info("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
return true;
} else {
LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
}
} else {
LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
LOGGER.debug("Peer {} has an invalid block", peer);
}
} else {
LOGGER.debug("Peer {} has no chain weight", peer);

View File

@@ -29,9 +29,11 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
import org.qortal.account.Account;
import org.qortal.api.ApiService;
import org.qortal.api.DomainMapService;
import org.qortal.api.GatewayService;
import org.qortal.api.resource.TransactionsResource;
import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
@@ -39,9 +41,11 @@ import org.qortal.controller.arbitrary.*;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.block.BlockData;
import org.qortal.data.block.BlockSummaryData;
import org.qortal.data.network.PeerChainTipData;
import org.qortal.data.naming.NameData;
import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ChatTransactionData;
import org.qortal.data.transaction.TransactionData;
@@ -58,6 +62,7 @@ import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.transform.TransformationException;
import org.qortal.utils.*;
public class Controller extends Thread {
@@ -108,6 +113,7 @@ public class Controller extends Thread {
private long repositoryBackupTimestamp = startTime; // ms
private long repositoryMaintenanceTimestamp = startTime; // ms
private long repositoryCheckpointTimestamp = startTime; // ms
private long prunePeersTimestamp = startTime; // ms
private long ntpCheckTimestamp = startTime; // ms
private long deleteExpiredTimestamp = startTime + DELETE_EXPIRED_INTERVAL; // ms
@@ -178,6 +184,52 @@ public class Controller extends Thread {
}
public GetArbitraryMetadataMessageStats getArbitraryMetadataMessageStats = new GetArbitraryMetadataMessageStats();
public static class GetAccountMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong cacheHits = new AtomicLong();
public AtomicLong unknownAccounts = new AtomicLong();
public GetAccountMessageStats() {
}
}
public GetAccountMessageStats getAccountMessageStats = new GetAccountMessageStats();
public static class GetAccountBalanceMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownAccounts = new AtomicLong();
public GetAccountBalanceMessageStats() {
}
}
public GetAccountBalanceMessageStats getAccountBalanceMessageStats = new GetAccountBalanceMessageStats();
public static class GetAccountTransactionsMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownAccounts = new AtomicLong();
public GetAccountTransactionsMessageStats() {
}
}
public GetAccountTransactionsMessageStats getAccountTransactionsMessageStats = new GetAccountTransactionsMessageStats();
public static class GetAccountNamesMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownAccounts = new AtomicLong();
public GetAccountNamesMessageStats() {
}
}
public GetAccountNamesMessageStats getAccountNamesMessageStats = new GetAccountNamesMessageStats();
public static class GetNameMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownAccounts = new AtomicLong();
public GetNameMessageStats() {
}
}
public GetNameMessageStats getNameMessageStats = new GetNameMessageStats();
public AtomicLong latestBlocksCacheRefills = new AtomicLong();
public StatsSnapshot() {
@@ -265,6 +317,10 @@ public class Controller extends Thread {
}
}
public static long uptime() {
return System.currentTimeMillis() - Controller.startTime;
}
/** Returns highest block, or null if it's not available. */
public BlockData getChainTip() {
synchronized (this.latestBlocks) {
@@ -346,12 +402,11 @@ public class Controller extends Thread {
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
RepositoryManager.archive(repository);
RepositoryManager.prune(repository);
RepositoryManager.rebuildTransactionSequences(repository);
}
} catch (DataException e) {
// If exception has no cause then repository is in use by some other process.
if (e.getCause() == null) {
// If exception has no cause or message then repository is in use by some other process.
if (e.getCause() == null && e.getMessage() == null) {
LOGGER.info("Repository in use by another process?");
Gui.getInstance().fatalError("Repository issue", "Repository in use by another process?");
} else {
@@ -362,23 +417,40 @@ public class Controller extends Thread {
return; // Not System.exit() so that GUI can display error
}
// Rebuild Names table and check database integrity (if enabled)
NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck();
namesDatabaseIntegrityCheck.rebuildAllNames();
if (Settings.getInstance().isNamesIntegrityCheckEnabled()) {
namesDatabaseIntegrityCheck.runIntegrityCheck();
// If we have a non-lite node, we need to perform some startup actions
if (!Settings.getInstance().isLite()) {
// Rebuild Names table and check database integrity (if enabled)
NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck();
namesDatabaseIntegrityCheck.rebuildAllNames();
if (Settings.getInstance().isNamesIntegrityCheckEnabled()) {
namesDatabaseIntegrityCheck.runIntegrityCheck();
}
LOGGER.info("Validating blockchain");
try {
BlockChain.validate();
Controller.getInstance().refillLatestBlocksCache();
LOGGER.info(String.format("Our chain height at start-up: %d", Controller.getInstance().getChainHeight()));
} catch (DataException e) {
LOGGER.error("Couldn't validate blockchain", e);
Gui.getInstance().fatalError("Blockchain validation issue", e);
return; // Not System.exit() so that GUI can display error
}
}
LOGGER.info("Validating blockchain");
try {
BlockChain.validate();
Controller.getInstance().refillLatestBlocksCache();
LOGGER.info(String.format("Our chain height at start-up: %d", Controller.getInstance().getChainHeight()));
try (Repository repository = RepositoryManager.getRepository()) {
if (RepositoryManager.needsTransactionSequenceRebuild(repository)) {
// Don't allow the node to start if transaction sequences haven't been built yet
// This is needed to handle a case when bootstrapping
LOGGER.error("Database upgrade needed. Please restart the core to complete the upgrade process.");
Gui.getInstance().fatalError("Database upgrade needed", "Please restart the core to complete the upgrade process.");
return;
}
} catch (DataException e) {
LOGGER.error("Couldn't validate blockchain", e);
Gui.getInstance().fatalError("Blockchain validation issue", e);
return; // Not System.exit() so that GUI can display error
LOGGER.error("Error checking transaction sequences in repository", e);
return;
}
// Import current trade bot states and minting accounts if they exist
@@ -441,6 +513,9 @@ public class Controller extends Thread {
AutoUpdate.getInstance().start();
}
LOGGER.info("Starting wallets");
PirateChainWalletController.getInstance().start();
LOGGER.info(String.format("Starting API on port %d", Settings.getInstance().getApiPort()));
try {
ApiService apiService = ApiService.getInstance();
@@ -497,6 +572,7 @@ public class Controller extends Thread {
final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval();
final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval();
long repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval();
final long prunePeersInterval = 5 * 60 * 1000L; // Every 5 minutes
// Start executor service for trimming or pruning
PruneManager.getInstance().start();
@@ -573,15 +649,20 @@ public class Controller extends Thread {
MessageType.INFO);
LOGGER.info("Starting scheduled repository maintenance. This can take a while...");
try (final Repository repository = RepositoryManager.getRepository()) {
int attempts = 0;
while (attempts <= 5) {
try (final Repository repository = RepositoryManager.getRepository()) {
attempts++;
// Timeout if the database isn't ready for maintenance after 60 seconds
long timeout = 60 * 1000L;
repository.performPeriodicMaintenance(timeout);
// Timeout if the database isn't ready for maintenance after 60 seconds
long timeout = 60 * 1000L;
repository.performPeriodicMaintenance(timeout);
LOGGER.info("Scheduled repository maintenance completed");
} catch (DataException | TimeoutException e) {
LOGGER.error("Scheduled repository maintenance failed", e);
LOGGER.info("Scheduled repository maintenance completed");
break;
} catch (DataException | TimeoutException e) {
LOGGER.info("Scheduled repository maintenance failed. Retrying up to 5 times...", e);
}
}
// Get a new random interval
@@ -589,10 +670,15 @@ public class Controller extends Thread {
}
// Prune stuck/slow/old peers
try {
Network.getInstance().prunePeers();
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
if (now >= prunePeersTimestamp + prunePeersInterval) {
prunePeersTimestamp = now + prunePeersInterval;
try {
LOGGER.debug("Pruning peers...");
Network.getInstance().prunePeers();
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
}
}
// Delete expired transactions
@@ -657,25 +743,25 @@ public class Controller extends Thread {
public static final Predicate<Peer> hasNoRecentBlock = peer -> {
final Long minLatestBlockTimestamp = getMinimumLatestBlockTimestamp();
final PeerChainTipData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getLastBlockTimestamp() == null || peerChainTipData.getLastBlockTimestamp() < minLatestBlockTimestamp;
final BlockSummaryData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getTimestamp() == null || peerChainTipData.getTimestamp() < minLatestBlockTimestamp;
};
public static final Predicate<Peer> hasNoOrSameBlock = peer -> {
final BlockData latestBlockData = getInstance().getChainTip();
final PeerChainTipData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getLastBlockSignature() == null || Arrays.equals(latestBlockData.getSignature(), peerChainTipData.getLastBlockSignature());
final BlockSummaryData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getSignature() == null || Arrays.equals(latestBlockData.getSignature(), peerChainTipData.getSignature());
};
public static final Predicate<Peer> hasOnlyGenesisBlock = peer -> {
final PeerChainTipData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getLastHeight() == null || peerChainTipData.getLastHeight() == 1;
final BlockSummaryData peerChainTipData = peer.getChainTipData();
return peerChainTipData == null || peerChainTipData.getHeight() == 1;
};
public static final Predicate<Peer> hasInferiorChainTip = peer -> {
final PeerChainTipData peerChainTipData = peer.getChainTipData();
final BlockSummaryData peerChainTipData = peer.getChainTipData();
final List<ByteArray> inferiorChainTips = Synchronizer.getInstance().inferiorChainSignatures;
return peerChainTipData == null || peerChainTipData.getLastBlockSignature() == null || inferiorChainTips.contains(new ByteArray(peerChainTipData.getLastBlockSignature()));
return peerChainTipData == null || peerChainTipData.getSignature() == null || inferiorChainTips.contains(ByteArray.wrap(peerChainTipData.getSignature()));
};
public static final Predicate<Peer> hasOldVersion = peer -> {
@@ -683,6 +769,28 @@ public class Controller extends Thread {
return peer.isAtLeastVersion(minPeerVersion) == false;
};
public static final Predicate<Peer> hasInvalidSigner = peer -> {
final BlockSummaryData peerChainTipData = peer.getChainTipData();
if (peerChainTipData == null)
return true;
try (Repository repository = RepositoryManager.getRepository()) {
return Account.getRewardShareEffectiveMintingLevel(repository, peerChainTipData.getMinterPublicKey()) == 0;
} catch (DataException e) {
return true;
}
};
public static final Predicate<Peer> wasRecentlyTooDivergent = peer -> {
Long now = NTP.getTime();
Long peerLastTooDivergentTime = peer.getLastTooDivergentTime();
if (now == null || peerLastTooDivergentTime == null)
return false;
// Exclude any peers that were TOO_DIVERGENT in the last 5 mins
return (now - peerLastTooDivergentTime < 5 * 60 * 1000L);
};
private long getRandomRepositoryMaintenanceInterval() {
final long minInterval = Settings.getInstance().getRepositoryMaintenanceMinInterval();
final long maxInterval = Settings.getInstance().getRepositoryMaintenanceMaxInterval();
@@ -727,19 +835,24 @@ public class Controller extends Thread {
String actionText;
// Use a more tolerant latest block timestamp in the isUpToDate() calls below to reduce misleading statuses.
// Any block in the last 30 minutes is considered "up to date" for the purposes of displaying statuses.
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
// Any block in the last 2 hours is considered "up to date" for the purposes of displaying statuses.
// This also aligns with the time interval required for continued online account submission.
final Long minLatestBlockTimestamp = NTP.getTime() - (2 * 60 * 60 * 1000L);
// Only show sync percent if it's less than 100, to avoid confusion
final Integer syncPercent = Synchronizer.getInstance().getSyncPercent();
final boolean isSyncing = (syncPercent != null && syncPercent < 100);
synchronized (Synchronizer.getInstance().syncLock) {
if (this.isMintingPossible) {
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED");
SysTray.getInstance().setTrayIcon(2);
if (Settings.getInstance().isLite()) {
actionText = Translator.INSTANCE.translate("SysTray", "LITE_NODE");
SysTray.getInstance().setTrayIcon(4);
}
else if (numberOfPeers < Settings.getInstance().getMinBlockchainPeers()) {
actionText = Translator.INSTANCE.translate("SysTray", "CONNECTING");
SysTray.getInstance().setTrayIcon(3);
}
else if (!this.isUpToDate(minLatestBlockTimestamp) && Synchronizer.getInstance().isSynchronizing()) {
else if (!this.isUpToDate(minLatestBlockTimestamp) && isSyncing) {
actionText = String.format("%s - %d%%", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"), Synchronizer.getInstance().getSyncPercent());
SysTray.getInstance().setTrayIcon(3);
}
@@ -747,13 +860,27 @@ public class Controller extends Thread {
actionText = String.format("%s", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"));
SysTray.getInstance().setTrayIcon(3);
}
else if (OnlineAccountsManager.getInstance().hasActiveOnlineAccountSignatures()) {
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED");
SysTray.getInstance().setTrayIcon(2);
}
else {
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_DISABLED");
SysTray.getInstance().setTrayIcon(4);
}
}
String tooltip = String.format("%s - %d %s - %s %d", actionText, numberOfPeers, connectionsText, heightText, height) + "\n" + String.format("%s: %s", Translator.INSTANCE.translate("SysTray", "BUILD_VERSION"), this.buildVersion);
String tooltip = String.format("%s - %d %s", actionText, numberOfPeers, connectionsText);
if (!Settings.getInstance().isLite()) {
tooltip = tooltip.concat(String.format(" - %s %d", heightText, height));
final Integer blocksRemaining = Synchronizer.getInstance().getBlocksRemaining();
if (blocksRemaining != null && blocksRemaining > 0) {
String blocksRemainingText = Translator.INSTANCE.translate("SysTray", "BLOCKS_REMAINING");
tooltip = tooltip.concat(String.format(" - %d %s", blocksRemaining, blocksRemainingText));
}
}
tooltip = tooltip.concat(String.format("\n%s: %s", Translator.INSTANCE.translate("SysTray", "BUILD_VERSION"), this.buildVersion));
SysTray.getInstance().setToolTipText(tooltip);
this.callbackExecutor.execute(() -> {
@@ -810,6 +937,9 @@ public class Controller extends Thread {
LOGGER.info("Shutting down API");
ApiService.getInstance().stop();
LOGGER.info("Shutting down wallets");
PirateChainWalletController.getInstance().shutdown();
if (Settings.getInstance().isAutoUpdateEnabled()) {
LOGGER.info("Shutting down auto-update");
AutoUpdate.getInstance().shutdown();
@@ -910,14 +1040,18 @@ public class Controller extends Thread {
// Callbacks for/from network
public void doNetworkBroadcast() {
if (Settings.getInstance().isLite()) {
// Lite nodes have nothing to broadcast
return;
}
Network network = Network.getInstance();
// Send (if outbound) / Request peer lists
network.broadcast(peer -> peer.isOutbound() ? network.buildPeersMessage(peer) : new GetPeersMessage());
// Send our current height
BlockData latestBlockData = getChainTip();
network.broadcast(peer -> network.buildHeightMessage(peer, latestBlockData));
network.broadcastOurChain();
// Request unconfirmed transaction signatures, but only if we're up-to-date.
// If we're NOT up-to-date then priority is synchronizing first
@@ -1124,6 +1258,10 @@ public class Controller extends Thread {
onNetworkHeightV2Message(peer, message);
break;
case BLOCK_SUMMARIES_V2:
onNetworkBlockSummariesV2Message(peer, message);
break;
case GET_TRANSACTION:
TransactionImporter.getInstance().onNetworkGetTransactionMessage(peer, message);
break;
@@ -1140,20 +1278,12 @@ public class Controller extends Thread {
TransactionImporter.getInstance().onNetworkTransactionSignaturesMessage(peer, message);
break;
case GET_ONLINE_ACCOUNTS:
OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsMessage(peer, message);
case GET_ONLINE_ACCOUNTS_V3:
OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message);
break;
case ONLINE_ACCOUNTS:
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsMessage(peer, message);
break;
case GET_ONLINE_ACCOUNTS_V2:
OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV2Message(peer, message);
break;
case ONLINE_ACCOUNTS_V2:
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV2Message(peer, message);
case ONLINE_ACCOUNTS_V3:
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message);
break;
case GET_ARBITRARY_DATA:
@@ -1192,6 +1322,26 @@ public class Controller extends Thread {
TradeBot.getInstance().onTradePresencesMessage(peer, message);
break;
case GET_ACCOUNT:
onNetworkGetAccountMessage(peer, message);
break;
case GET_ACCOUNT_BALANCE:
onNetworkGetAccountBalanceMessage(peer, message);
break;
case GET_ACCOUNT_TRANSACTIONS:
onNetworkGetAccountTransactionsMessage(peer, message);
break;
case GET_ACCOUNT_NAMES:
onNetworkGetAccountNamesMessage(peer, message);
break;
case GET_NAME:
onNetworkGetNameMessage(peer, message);
break;
default:
LOGGER.debug(() -> String.format("Unhandled %s message [ID %d] from peer %s", message.getType().name(), message.getId(), peer));
break;
@@ -1203,7 +1353,7 @@ public class Controller extends Thread {
byte[] signature = getBlockMessage.getSignature();
this.stats.getBlockMessageStats.requests.incrementAndGet();
ByteArray signatureAsByteArray = new ByteArray(signature);
ByteArray signatureAsByteArray = ByteArray.wrap(signature);
CachedBlockMessage cachedBlockMessage = this.blockMessageCache.get(signatureAsByteArray);
int blockCacheSize = Settings.getInstance().getBlockCacheSize();
@@ -1213,7 +1363,7 @@ public class Controller extends Thread {
this.stats.getBlockMessageStats.cacheHits.incrementAndGet();
// We need to duplicate it to prevent multiple threads setting ID on the same message
CachedBlockMessage clonedBlockMessage = cachedBlockMessage.cloneWithNewId(message.getId());
CachedBlockMessage clonedBlockMessage = Message.cloneWithNewId(cachedBlockMessage, message.getId());
if (!peer.sendMessage(clonedBlockMessage))
peer.disconnect("failed to send block");
@@ -1234,9 +1384,24 @@ public class Controller extends Thread {
// If we have no block data, we should check the archive in case it's there
if (blockData == null) {
if (Settings.getInstance().isArchiveEnabled()) {
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (bytes != null) {
CachedBlockMessage blockMessage = new CachedBlockMessage(bytes);
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
Message blockMessage;
switch (serializationVersion) {
case 1:
blockMessage = new CachedBlockMessage(bytes);
break;
case 2:
blockMessage = new CachedBlockV2Message(bytes);
break;
default:
return;
}
blockMessage.setId(message.getId());
// This call also causes the other needed data to be pulled in from repository
@@ -1259,8 +1424,10 @@ public class Controller extends Thread {
// Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'block unknown' response to peer %s for GET_BLOCK request for unknown block %s", peer, Base58.encode(signature)));
// We'll send empty block summaries message as it's very short
Message blockUnknownMessage = new BlockSummariesMessage(Collections.emptyList());
// Send generic 'unknown' message as it's very short
Message blockUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION
? new GenericUnknownMessage()
: new BlockSummariesMessage(Collections.emptyList());
blockUnknownMessage.setId(message.getId());
if (!peer.sendMessage(blockUnknownMessage))
peer.disconnect("failed to send block-unknown response");
@@ -1269,10 +1436,21 @@ public class Controller extends Thread {
Block block = new Block(repository, blockData);
// V2 support
if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) {
Message blockMessage = new BlockV2Message(block);
blockMessage.setId(message.getId());
if (!peer.sendMessage(blockMessage)) {
peer.disconnect("failed to send block");
// Don't fall-through to caching because failure to send might be from failure to build message
return;
}
return;
}
CachedBlockMessage blockMessage = new CachedBlockMessage(block);
blockMessage.setId(message.getId());
// This call also causes the other needed data to be pulled in from repository
if (!peer.sendMessage(blockMessage)) {
peer.disconnect("failed to send block");
// Don't fall-through to caching because failure to send might be from failure to build message
@@ -1283,10 +1461,12 @@ public class Controller extends Thread {
if (getChainHeight() - blockData.getHeight() <= blockCacheSize) {
this.stats.getBlockMessageStats.cacheFills.incrementAndGet();
this.blockMessageCache.put(new ByteArray(blockData.getSignature()), blockMessage);
this.blockMessageCache.put(ByteArray.wrap(blockData.getSignature()), blockMessage);
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send block %s to peer %s", Base58.encode(signature), peer), e);
LOGGER.error(String.format("Repository issue while sending block %s to peer %s", Base58.encode(signature), peer), e);
} catch (TransformationException e) {
LOGGER.error(String.format("Serialization issue while sending block %s to peer %s", Base58.encode(signature), peer), e);
}
}
@@ -1296,11 +1476,15 @@ public class Controller extends Thread {
this.stats.getBlockSummariesStats.requests.incrementAndGet();
// If peer's parent signature matches our latest block signature
// then we can short-circuit with an empty response
// then we have no blocks after that and can short-circuit with an empty response
BlockData chainTip = getChainTip();
if (chainTip != null && Arrays.equals(parentSignature, chainTip.getSignature())) {
Message blockSummariesMessage = new BlockSummariesMessage(Collections.emptyList());
Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION
? new BlockSummariesV2Message(Collections.emptyList())
: new BlockSummariesMessage(Collections.emptyList());
blockSummariesMessage.setId(message.getId());
if (!peer.sendMessage(blockSummariesMessage))
peer.disconnect("failed to send block summaries");
@@ -1356,7 +1540,9 @@ public class Controller extends Thread {
this.stats.getBlockSummariesStats.fullyFromCache.incrementAndGet();
}
Message blockSummariesMessage = new BlockSummariesMessage(blockSummaries);
Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION
? new BlockSummariesV2Message(blockSummaries)
: new BlockSummariesMessage(blockSummaries);
blockSummariesMessage.setId(message.getId());
if (!peer.sendMessage(blockSummariesMessage))
peer.disconnect("failed to send block summaries");
@@ -1427,20 +1613,250 @@ public class Controller extends Thread {
private void onNetworkHeightV2Message(Peer peer, Message message) {
HeightV2Message heightV2Message = (HeightV2Message) message;
// If peer is inbound and we've not updated their height
// then this is probably their initial HEIGHT_V2 message
// so they need a corresponding HEIGHT_V2 message from us
if (!peer.isOutbound() && (peer.getChainTipData() == null || peer.getChainTipData().getLastHeight() == null))
peer.sendMessage(Network.getInstance().buildHeightMessage(peer, getChainTip()));
if (!Settings.getInstance().isLite()) {
// If peer is inbound and we've not updated their height
// then this is probably their initial HEIGHT_V2 message
// so they need a corresponding HEIGHT_V2 message from us
if (!peer.isOutbound() && peer.getChainTipData() == null) {
Message responseMessage = Network.getInstance().buildHeightOrChainTipInfo(peer);
if (responseMessage == null || !peer.sendMessage(responseMessage)) {
peer.disconnect("failed to send our chain tip info");
return;
}
}
}
// Update peer chain tip data
PeerChainTipData newChainTipData = new PeerChainTipData(heightV2Message.getHeight(), heightV2Message.getSignature(), heightV2Message.getTimestamp(), heightV2Message.getMinterPublicKey());
BlockSummaryData newChainTipData = new BlockSummaryData(heightV2Message.getHeight(), heightV2Message.getSignature(), heightV2Message.getMinterPublicKey(), heightV2Message.getTimestamp());
peer.setChainTipData(newChainTipData);
// Potentially synchronize
Synchronizer.getInstance().requestSync();
}
private void onNetworkBlockSummariesV2Message(Peer peer, Message message) {
BlockSummariesV2Message blockSummariesV2Message = (BlockSummariesV2Message) message;
if (!Settings.getInstance().isLite()) {
// If peer is inbound and we've not updated their height
// then this is probably their initial BLOCK_SUMMARIES_V2 message
// so they need a corresponding BLOCK_SUMMARIES_V2 message from us
if (!peer.isOutbound() && peer.getChainTipData() == null) {
Message responseMessage = Network.getInstance().buildHeightOrChainTipInfo(peer);
if (responseMessage == null || !peer.sendMessage(responseMessage)) {
peer.disconnect("failed to send our chain tip info");
return;
}
}
}
if (message.hasId()) {
/*
* Experimental proof-of-concept: discard messages with ID
* These are 'late' reply messages received after timeout has expired,
* having been passed upwards from Peer to Network to Controller.
* Hence, these are NOT simple "here's my chain tip" broadcasts from other peers.
*/
LOGGER.debug("Discarding late {} message with ID {} from {}", message.getType().name(), message.getId(), peer);
return;
}
// Update peer chain tip data
peer.setChainTipSummaries(blockSummariesV2Message.getBlockSummaries());
// Potentially synchronize
Synchronizer.getInstance().requestSync();
}
private void onNetworkGetAccountMessage(Peer peer, Message message) {
GetAccountMessage getAccountMessage = (GetAccountMessage) message;
String address = getAccountMessage.getAddress();
this.stats.getAccountMessageStats.requests.incrementAndGet();
try (final Repository repository = RepositoryManager.getRepository()) {
AccountData accountData = repository.getAccountRepository().getAccount(address);
if (accountData == null) {
// We don't have this account
this.stats.getAccountMessageStats.unknownAccounts.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT request for unknown account %s", peer, address));
// Send generic 'unknown' message as it's very short
Message accountUnknownMessage = new GenericUnknownMessage();
accountUnknownMessage.setId(message.getId());
if (!peer.sendMessage(accountUnknownMessage))
peer.disconnect("failed to send account-unknown response");
return;
}
AccountMessage accountMessage = new AccountMessage(accountData);
accountMessage.setId(message.getId());
if (!peer.sendMessage(accountMessage)) {
peer.disconnect("failed to send account");
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send account %s to peer %s", address, peer), e);
}
}
private void onNetworkGetAccountBalanceMessage(Peer peer, Message message) {
GetAccountBalanceMessage getAccountBalanceMessage = (GetAccountBalanceMessage) message;
String address = getAccountBalanceMessage.getAddress();
long assetId = getAccountBalanceMessage.getAssetId();
this.stats.getAccountBalanceMessageStats.requests.incrementAndGet();
try (final Repository repository = RepositoryManager.getRepository()) {
AccountBalanceData accountBalanceData = repository.getAccountRepository().getBalance(address, assetId);
if (accountBalanceData == null) {
// We don't have this account
this.stats.getAccountBalanceMessageStats.unknownAccounts.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_BALANCE request for unknown account %s and asset ID %d", peer, address, assetId));
// Send generic 'unknown' message as it's very short
Message accountUnknownMessage = new GenericUnknownMessage();
accountUnknownMessage.setId(message.getId());
if (!peer.sendMessage(accountUnknownMessage))
peer.disconnect("failed to send account-unknown response");
return;
}
AccountBalanceMessage accountMessage = new AccountBalanceMessage(accountBalanceData);
accountMessage.setId(message.getId());
if (!peer.sendMessage(accountMessage)) {
peer.disconnect("failed to send account balance");
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send balance for account %s and asset ID %d to peer %s", address, assetId, peer), e);
}
}
private void onNetworkGetAccountTransactionsMessage(Peer peer, Message message) {
GetAccountTransactionsMessage getAccountTransactionsMessage = (GetAccountTransactionsMessage) message;
String address = getAccountTransactionsMessage.getAddress();
int limit = Math.min(getAccountTransactionsMessage.getLimit(), 100);
int offset = getAccountTransactionsMessage.getOffset();
this.stats.getAccountTransactionsMessageStats.requests.incrementAndGet();
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null,
null, null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED, limit, offset, false);
// Expand signatures to transactions
List<TransactionData> transactions = new ArrayList<>(signatures.size());
for (byte[] signature : signatures) {
transactions.add(repository.getTransactionRepository().fromSignature(signature));
}
if (transactions == null) {
// We don't have this account
this.stats.getAccountTransactionsMessageStats.unknownAccounts.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_TRANSACTIONS request for unknown account %s", peer, address));
// Send generic 'unknown' message as it's very short
Message accountUnknownMessage = new GenericUnknownMessage();
accountUnknownMessage.setId(message.getId());
if (!peer.sendMessage(accountUnknownMessage))
peer.disconnect("failed to send account-unknown response");
return;
}
TransactionsMessage transactionsMessage = new TransactionsMessage(transactions);
transactionsMessage.setId(message.getId());
if (!peer.sendMessage(transactionsMessage)) {
peer.disconnect("failed to send account transactions");
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while sending transactions for account %s %d to peer %s", address, peer), e);
} catch (MessageException e) {
LOGGER.error(String.format("Message serialization issue while sending transactions for account %s %d to peer %s", address, peer), e);
}
}
private void onNetworkGetAccountNamesMessage(Peer peer, Message message) {
GetAccountNamesMessage getAccountNamesMessage = (GetAccountNamesMessage) message;
String address = getAccountNamesMessage.getAddress();
this.stats.getAccountNamesMessageStats.requests.incrementAndGet();
try (final Repository repository = RepositoryManager.getRepository()) {
List<NameData> namesDataList = repository.getNameRepository().getNamesByOwner(address);
if (namesDataList == null) {
// We don't have this account
this.stats.getAccountNamesMessageStats.unknownAccounts.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_NAMES request for unknown account %s", peer, address));
// Send generic 'unknown' message as it's very short
Message accountUnknownMessage = new GenericUnknownMessage();
accountUnknownMessage.setId(message.getId());
if (!peer.sendMessage(accountUnknownMessage))
peer.disconnect("failed to send account-unknown response");
return;
}
NamesMessage namesMessage = new NamesMessage(namesDataList);
namesMessage.setId(message.getId());
if (!peer.sendMessage(namesMessage)) {
peer.disconnect("failed to send account names");
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send names for account %s to peer %s", address, peer), e);
}
}
private void onNetworkGetNameMessage(Peer peer, Message message) {
GetNameMessage getNameMessage = (GetNameMessage) message;
String name = getNameMessage.getName();
this.stats.getNameMessageStats.requests.incrementAndGet();
try (final Repository repository = RepositoryManager.getRepository()) {
NameData nameData = repository.getNameRepository().fromName(name);
if (nameData == null) {
// We don't have this account
this.stats.getNameMessageStats.unknownAccounts.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
LOGGER.debug(() -> String.format("Sending 'name unknown' response to peer %s for GET_NAME request for unknown name %s", peer, name));
// Send generic 'unknown' message as it's very short
Message nameUnknownMessage = new GenericUnknownMessage();
nameUnknownMessage.setId(message.getId());
if (!peer.sendMessage(nameUnknownMessage))
peer.disconnect("failed to send name-unknown response");
return;
}
NamesMessage namesMessage = new NamesMessage(Arrays.asList(nameData));
namesMessage.setId(message.getId());
if (!peer.sendMessage(namesMessage)) {
peer.disconnect("failed to send name data");
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send name %s to peer %s", name, peer), e);
}
}
// Utilities
@@ -1470,14 +1886,14 @@ public class Controller extends Thread {
continue;
}
final PeerChainTipData peerChainTipData = peer.getChainTipData();
BlockSummaryData peerChainTipData = peer.getChainTipData();
if (peerChainTipData == null) {
iterator.remove();
continue;
}
// Disregard peers that don't have a recent block
if (peerChainTipData.getLastBlockTimestamp() == null || peerChainTipData.getLastBlockTimestamp() < minLatestBlockTimestamp) {
if (peerChainTipData.getTimestamp() == null || peerChainTipData.getTimestamp() < minLatestBlockTimestamp) {
iterator.remove();
continue;
}
@@ -1492,6 +1908,11 @@ public class Controller extends Thread {
* @return boolean - whether our node's blockchain is up to date or not
*/
public boolean isUpToDate(Long minLatestBlockTimestamp) {
if (Settings.getInstance().isLite()) {
// Lite nodes are always "up to date"
return true;
}
// Do we even have a vaguely recent block?
if (minLatestBlockTimestamp == null)
return false;
@@ -1500,6 +1921,10 @@ public class Controller extends Thread {
if (latestBlockData == null || latestBlockData.getTimestamp() < minLatestBlockTimestamp)
return false;
if (Settings.getInstance().isSingleNodeTestnet())
// Single node testnets won't have peers, so we can assume up to date from this point
return true;
// Needs a mutable copy of the unmodifiableList
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
if (peers == null)

View File

@@ -0,0 +1,74 @@
package org.qortal.controller;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.DevProxyService;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
public class DevProxyManager {
protected static final Logger LOGGER = LogManager.getLogger(DevProxyManager.class);
private static DevProxyManager instance;
private boolean running = false;
private String sourceHostAndPort = "127.0.0.1:5173"; // Default for React/Vite
private DevProxyManager() {
}
public static DevProxyManager getInstance() {
if (instance == null)
instance = new DevProxyManager();
return instance;
}
public void start() throws DataException {
synchronized(this) {
if (this.running) {
// Already running
return;
}
LOGGER.info(String.format("Starting developer proxy service on port %d", Settings.getInstance().getDevProxyPort()));
DevProxyService devProxyService = DevProxyService.getInstance();
devProxyService.start();
this.running = true;
}
}
public void stop() {
synchronized(this) {
if (!this.running) {
// Not running
return;
}
LOGGER.info(String.format("Shutting down developer proxy service"));
DevProxyService devProxyService = DevProxyService.getInstance();
devProxyService.stop();
this.running = false;
}
}
public void setSourceHostAndPort(String sourceHostAndPort) {
this.sourceHostAndPort = sourceHostAndPort;
}
public String getSourceHostAndPort() {
return this.sourceHostAndPort;
}
public Integer getPort() {
return Settings.getInstance().getDevProxyPort();
}
public boolean isRunning() {
return this.running;
}
}

View File

@@ -0,0 +1,189 @@
package org.qortal.controller;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.*;
import java.security.SecureRandom;
import java.util.*;
import static org.qortal.network.message.MessageType.*;
public class LiteNode {
private static final Logger LOGGER = LogManager.getLogger(LiteNode.class);
private static LiteNode instance;
public Map<Integer, Long> pendingRequests = Collections.synchronizedMap(new HashMap<>());
public int MAX_TRANSACTIONS_PER_MESSAGE = 100;
public LiteNode() {
}
public static synchronized LiteNode getInstance() {
if (instance == null) {
instance = new LiteNode();
}
return instance;
}
/**
* Fetch account data from peers for given QORT address
* @param address - the QORT address to query
* @return accountData - the account data for this address, or null if not retrieved
*/
public AccountData fetchAccountData(String address) {
GetAccountMessage getAccountMessage = new GetAccountMessage(address);
AccountMessage accountMessage = (AccountMessage) this.sendMessage(getAccountMessage, ACCOUNT);
if (accountMessage == null) {
return null;
}
return accountMessage.getAccountData();
}
/**
* Fetch account balance data from peers for given QORT address and asset ID
* @param address - the QORT address to query
* @return balance - the balance for this address and assetId, or null if not retrieved
*/
public AccountBalanceData fetchAccountBalance(String address, long assetId) {
GetAccountBalanceMessage getAccountMessage = new GetAccountBalanceMessage(address, assetId);
AccountBalanceMessage accountMessage = (AccountBalanceMessage) this.sendMessage(getAccountMessage, ACCOUNT_BALANCE);
if (accountMessage == null) {
return null;
}
return accountMessage.getAccountBalanceData();
}
/**
* Fetch list of transactions for given QORT address
* @param address - the QORT address to query
* @param limit - the maximum number of results to return
* @param offset - the starting index
* @return a list of TransactionData objects, or null if not retrieved
*/
public List<TransactionData> fetchAccountTransactions(String address, int limit, int offset) {
List<TransactionData> allTransactions = new ArrayList<>();
if (limit == 0) {
limit = Integer.MAX_VALUE;
}
int batchSize = Math.min(limit, MAX_TRANSACTIONS_PER_MESSAGE);
while (allTransactions.size() < limit) {
GetAccountTransactionsMessage getAccountTransactionsMessage = new GetAccountTransactionsMessage(address, batchSize, offset);
TransactionsMessage transactionsMessage = (TransactionsMessage) this.sendMessage(getAccountTransactionsMessage, TRANSACTIONS);
if (transactionsMessage == null) {
// An error occurred, so give up instead of returning partial results
return null;
}
allTransactions.addAll(transactionsMessage.getTransactions());
if (transactionsMessage.getTransactions().size() < batchSize) {
// No more transactions to fetch
break;
}
offset += batchSize;
}
return allTransactions;
}
/**
* Fetch list of names for given QORT address
* @param address - the QORT address to query
* @return a list of NameData objects, or null if not retrieved
*/
public List<NameData> fetchAccountNames(String address) {
GetAccountNamesMessage getAccountNamesMessage = new GetAccountNamesMessage(address);
NamesMessage namesMessage = (NamesMessage) this.sendMessage(getAccountNamesMessage, NAMES);
if (namesMessage == null) {
return null;
}
return namesMessage.getNameDataList();
}
/**
* Fetch info about a registered name
* @param name - the name to query
* @return a NameData object, or null if not retrieved
*/
public NameData fetchNameData(String name) {
GetNameMessage getNameMessage = new GetNameMessage(name);
NamesMessage namesMessage = (NamesMessage) this.sendMessage(getNameMessage, NAMES);
if (namesMessage == null) {
return null;
}
List<NameData> nameDataList = namesMessage.getNameDataList();
if (nameDataList == null || nameDataList.size() != 1) {
return null;
}
// We are only expecting a single item in the list
return nameDataList.get(0);
}
private Message sendMessage(Message message, MessageType expectedResponseMessageType) {
// This asks a random peer for the data
// TODO: ask multiple peers, and disregard everything if there are any significant differences in the responses
// Needs a mutable copy of the unmodifiableList
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
// Disregard peers that have "misbehaved" recently
peers.removeIf(Controller.hasMisbehaved);
// Disregard peers that only have genesis block
// TODO: peers.removeIf(Controller.hasOnlyGenesisBlock);
// Disregard peers that are on an old version
peers.removeIf(Controller.hasOldVersion);
// Disregard peers that are on a known inferior chain tip
// TODO: peers.removeIf(Controller.hasInferiorChainTip);
if (peers.isEmpty()) {
LOGGER.info("No peers available to send {} message to", message.getType());
return null;
}
// Pick random peer
int index = new SecureRandom().nextInt(peers.size());
Peer peer = peers.get(index);
LOGGER.info("Sending {} message to peer {}...", message.getType(), peer);
Message responseMessage;
try {
responseMessage = peer.getResponse(message);
} catch (InterruptedException e) {
return null;
}
if (responseMessage == null) {
LOGGER.info("Peer {} didn't respond to {} message", peer, message.getType());
return null;
}
else if (responseMessage.getType() != expectedResponseMessageType) {
LOGGER.info("Peer responded with unexpected message type {} (should be {})", peer, responseMessage.getType(), expectedResponseMessageType);
return null;
}
LOGGER.info("Peer {} responded with {} message", peer, responseMessage.getType());
return responseMessage;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,404 @@
package org.qortal.controller;
import com.rust.litewalletjni.LiteWalletJni;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONException;
import org.json.JSONObject;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataReader;
import org.qortal.arbitrary.ArbitraryDataResource;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.crosschain.ForeignBlockchainException;
import org.qortal.crosschain.PirateWallet;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Objects;
public class PirateChainWalletController extends Thread {
protected static final Logger LOGGER = LogManager.getLogger(PirateChainWalletController.class);
private static PirateChainWalletController instance;
final private static long SAVE_INTERVAL = 60 * 60 * 1000L; // 1 hour
private long lastSaveTime = 0L;
private boolean running;
private PirateWallet currentWallet = null;
private boolean shouldLoadWallet = false;
private String loadStatus = null;
private static String qdnWalletSignature = "EsfUw54perxkEtfoUoL7Z97XPrNsZRZXePVZPz3cwRm9qyEPSofD5KmgVpDqVitQp7LhnZRmL6z2V9hEe1YS45T";
private PirateChainWalletController() {
this.running = true;
}
public static PirateChainWalletController getInstance() {
if (instance == null)
instance = new PirateChainWalletController();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Pirate Chain Wallet Controller");
try {
while (running && !Controller.isStopping()) {
Thread.sleep(1000);
// Wait until we have a request to load the wallet
if (!shouldLoadWallet) {
continue;
}
if (!LiteWalletJni.isLoaded()) {
this.loadLibrary();
// If still not loaded, sleep to prevent too many requests
if (!LiteWalletJni.isLoaded()) {
Thread.sleep(5 * 1000);
continue;
}
}
// Wallet is downloaded, so clear the status
this.loadStatus = null;
if (this.currentWallet == null) {
// Nothing to do yet
continue;
}
if (this.currentWallet.isNullSeedWallet()) {
// Don't sync the null seed wallet
continue;
}
LOGGER.debug("Syncing Pirate Chain wallet...");
String response = LiteWalletJni.execute("sync", "");
LOGGER.debug("sync response: {}", response);
try {
JSONObject json = new JSONObject(response);
if (json.has("result")) {
String result = json.getString("result");
// We may have to set wallet to ready if this is the first ever successful sync
if (Objects.equals(result, "success")) {
this.currentWallet.setReady(true);
}
}
} catch (JSONException e) {
LOGGER.info("Unable to interpret JSON", e);
}
// Rate limit sync attempts
Thread.sleep(30000);
// Save wallet if needed
Long now = NTP.getTime();
if (now != null && now-SAVE_INTERVAL >= this.lastSaveTime) {
this.saveCurrentWallet();
}
}
} catch (InterruptedException e) {
// Fall-through to exit
}
}
public void shutdown() {
// Save the wallet
this.saveCurrentWallet();
this.running = false;
this.interrupt();
}
// QDN & wallet libraries
private void loadLibrary() throws InterruptedException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Check if architecture is supported
String libFileName = PirateChainWalletController.getRustLibFilename();
if (libFileName == null) {
String osName = System.getProperty("os.name");
String osArchitecture = System.getProperty("os.arch");
this.loadStatus = String.format("Unsupported architecture (%s %s)", osName, osArchitecture);
return;
}
// Check if the library exists in the wallets folder
Path libDirectory = PirateChainWalletController.getRustLibOuterDirectory();
Path libPath = Paths.get(libDirectory.toString(), libFileName);
if (Files.exists(libPath)) {
// Already downloaded; we can load the library right away
LiteWalletJni.loadLibrary();
return;
}
// Library not found, so check if we've fetched the resource from QDN
ArbitraryTransactionData t = this.getTransactionData(repository);
if (t == null || t.getService() == null) {
// Can't find the transaction - maybe on a different chain?
return;
}
// Wait until we have a sufficient number of peers to attempt QDN downloads
List<Peer> handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
if (handshakedPeers.size() < Settings.getInstance().getMinBlockchainPeers()) {
// Wait for more peers
this.loadStatus = String.format("Searching for peers...");
return;
}
// Build resource
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(t.getName(),
ArbitraryDataFile.ResourceIdType.NAME, t.getService(), t.getIdentifier());
try {
arbitraryDataReader.loadSynchronously(false);
} catch (MissingDataException e) {
LOGGER.info("Missing data when loading Pirate Chain library");
}
// Check its status
ArbitraryResourceStatus status = ArbitraryTransactionUtils.getStatus(
t.getService(), t.getName(), t.getIdentifier(), false);
if (status.getStatus() != ArbitraryResourceStatus.Status.READY) {
LOGGER.info("Not ready yet: {}", status.getTitle());
this.loadStatus = String.format("Downloading files from QDN... (%d / %d)", status.getLocalChunkCount(), status.getTotalChunkCount());
return;
}
// Files are downloaded, so copy the necessary files to the wallets folder
// Delete the wallets/*/lib directory first, in case earlier versions of the wallet are present
Path walletsLibDirectory = PirateChainWalletController.getWalletsLibDirectory();
if (Files.exists(walletsLibDirectory)) {
FilesystemUtils.safeDeleteDirectory(walletsLibDirectory, false);
}
Files.createDirectories(libDirectory);
FileUtils.copyDirectory(arbitraryDataReader.getFilePath().toFile(), libDirectory.toFile());
// Clear reader cache so only one copy exists
ArbitraryDataResource resource = new ArbitraryDataResource(t.getName(),
ArbitraryDataFile.ResourceIdType.NAME, t.getService(), t.getIdentifier());
resource.deleteCache();
// Finally, load the library
LiteWalletJni.loadLibrary();
} catch (DataException e) {
LOGGER.error("Repository issue when loading Pirate Chain library", e);
} catch (IOException e) {
LOGGER.error("Error when loading Pirate Chain library", e);
}
}
private ArbitraryTransactionData getTransactionData(Repository repository) {
try {
byte[] signature = Base58.decode(qdnWalletSignature);
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return null;
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
if (arbitraryTransaction != null) {
return (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
}
return null;
} catch (DataException e) {
return null;
}
}
public static String getRustLibFilename() {
String osName = System.getProperty("os.name");
String osArchitecture = System.getProperty("os.arch");
if (osName.equals("Mac OS X") && osArchitecture.equals("x86_64")) {
return "librust-macos-x86_64.dylib";
}
else if ((osName.equals("Linux") || osName.equals("FreeBSD")) && osArchitecture.equals("aarch64")) {
return "librust-linux-aarch64.so";
}
else if ((osName.equals("Linux") || osName.equals("FreeBSD")) && osArchitecture.equals("amd64")) {
return "librust-linux-x86_64.so";
}
else if (osName.contains("Windows") && osArchitecture.equals("amd64")) {
return "librust-windows-x86_64.dll";
}
return null;
}
public static Path getWalletsLibDirectory() {
return Paths.get(Settings.getInstance().getWalletsPath(), "PirateChain", "lib");
}
public static Path getRustLibOuterDirectory() {
String sigPrefix = qdnWalletSignature.substring(0, 8);
return Paths.get(Settings.getInstance().getWalletsPath(), "PirateChain", "lib", sigPrefix);
}
// Wallet functions
public boolean initWithEntropy58(String entropy58) {
return this.initWithEntropy58(entropy58, false);
}
public boolean initNullSeedWallet() {
return this.initWithEntropy58(Base58.encode(new byte[32]), true);
}
private boolean initWithEntropy58(String entropy58, boolean isNullSeedWallet) {
// If the JNI library isn't loaded yet then we can't proceed
if (!LiteWalletJni.isLoaded()) {
shouldLoadWallet = true;
return false;
}
byte[] entropyBytes = Base58.decode(entropy58);
if (entropyBytes == null || entropyBytes.length != 32) {
LOGGER.info("Invalid entropy bytes");
return false;
}
if (this.currentWallet != null) {
if (this.currentWallet.entropyBytesEqual(entropyBytes)) {
// Wallet already active - nothing to do
return true;
}
else {
// Different wallet requested - close the existing one and switch over
this.closeCurrentWallet();
}
}
try {
this.currentWallet = new PirateWallet(entropyBytes, isNullSeedWallet);
if (!this.currentWallet.isReady()) {
// Don't persist wallets that aren't ready
this.currentWallet = null;
}
return true;
} catch (IOException e) {
LOGGER.info("Unable to initialize wallet: {}", e.getMessage());
}
return false;
}
private void saveCurrentWallet() {
if (this.currentWallet == null) {
// Nothing to do
return;
}
try {
if (this.currentWallet.save()) {
Long now = NTP.getTime();
if (now != null) {
this.lastSaveTime = now;
}
}
} catch (IOException e) {
LOGGER.info("Unable to save wallet");
}
}
public PirateWallet getCurrentWallet() {
return this.currentWallet;
}
private void closeCurrentWallet() {
this.saveCurrentWallet();
this.currentWallet = null;
}
public void ensureInitialized() throws ForeignBlockchainException {
if (!LiteWalletJni.isLoaded() || this.currentWallet == null || !this.currentWallet.isInitialized()) {
throw new ForeignBlockchainException("Pirate wallet isn't initialized yet");
}
}
public void ensureNotNullSeed() throws ForeignBlockchainException {
// Safety check to make sure funds aren't sent to a null seed wallet
if (this.currentWallet == null || this.currentWallet.isNullSeedWallet()) {
throw new ForeignBlockchainException("Invalid wallet");
}
}
public void ensureSynchronized() throws ForeignBlockchainException {
if (this.currentWallet == null || !this.currentWallet.isSynchronized()) {
throw new ForeignBlockchainException("Wallet isn't synchronized yet");
}
String response = LiteWalletJni.execute("syncStatus", "");
JSONObject json = new JSONObject(response);
if (json.has("syncing")) {
boolean isSyncing = Boolean.valueOf(json.getString("syncing"));
if (isSyncing) {
long syncedBlocks = json.getLong("synced_blocks");
long totalBlocks = json.getLong("total_blocks");
throw new ForeignBlockchainException(String.format("Sync in progress (%d / %d). Please try again later.", syncedBlocks, totalBlocks));
}
}
}
public String getSyncStatus() {
if (this.currentWallet == null || !this.currentWallet.isInitialized()) {
if (this.loadStatus != null) {
return this.loadStatus;
}
return "Not initialized yet";
}
String syncStatusResponse = LiteWalletJni.execute("syncStatus", "");
org.json.JSONObject json = new JSONObject(syncStatusResponse);
if (json.has("syncing")) {
boolean isSyncing = Boolean.valueOf(json.getString("syncing"));
if (isSyncing) {
long syncedBlocks = json.getLong("synced_blocks");
long totalBlocks = json.getLong("total_blocks");
return String.format("Sync in progress (%d / %d)", syncedBlocks, totalBlocks);
}
}
boolean isSynchronized = this.currentWallet.isSynchronized();
if (isSynchronized) {
return "Synchronized";
}
return "Initializing wallet...";
}
}

View File

@@ -19,21 +19,13 @@ import org.qortal.block.BlockChain;
import org.qortal.data.block.BlockData;
import org.qortal.data.block.BlockSummaryData;
import org.qortal.data.block.CommonBlockData;
import org.qortal.data.network.PeerChainTipData;
import org.qortal.data.transaction.RewardShareTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.event.Event;
import org.qortal.event.EventBus;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.BlockMessage;
import org.qortal.network.message.BlockSummariesMessage;
import org.qortal.network.message.GetBlockMessage;
import org.qortal.network.message.GetBlockSummariesMessage;
import org.qortal.network.message.GetSignaturesV2Message;
import org.qortal.network.message.Message;
import org.qortal.network.message.SignaturesMessage;
import org.qortal.network.message.Message.MessageType;
import org.qortal.network.message.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
@@ -61,7 +53,8 @@ public class Synchronizer extends Thread {
/** Maximum number of block signatures we ask from peer in one go */
private static final int MAXIMUM_REQUEST_SIZE = 200; // XXX move to Settings?
private static final long RECOVERY_MODE_TIMEOUT = 10 * 60 * 1000L; // ms
/** Maximum number of consecutive failed sync attempts before marking peer as misbehaved */
private static final int MAX_CONSECUTIVE_FAILED_SYNC_ATTEMPTS = 3;
private boolean running;
@@ -83,12 +76,14 @@ public class Synchronizer extends Thread {
private volatile boolean isSynchronizing = false;
/** Temporary estimate of synchronization progress for SysTray use. */
private volatile int syncPercent = 0;
/** Temporary estimate of blocks remaining for SysTray use. */
private volatile int blocksRemaining = 0;
private static volatile boolean requestSync = false;
private boolean syncRequestPending = false;
// Keep track of invalid blocks so that we don't keep trying to sync them
private Map<String, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
private Map<ByteArray, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
public Long timeValidBlockLastReceived = null;
public Long timeInvalidBlockLastReceived = null;
@@ -134,6 +129,11 @@ public class Synchronizer extends Thread {
public void run() {
Thread.currentThread().setName("Synchronizer");
if (Settings.getInstance().isLite()) {
// Lite nodes don't need to sync
return;
}
try {
while (running && !Controller.isStopping()) {
Thread.sleep(1000);
@@ -173,8 +173,8 @@ public class Synchronizer extends Thread {
public Integer getSyncPercent() {
synchronized (this.syncLock) {
// Report as 100% synced if the latest block is within the last 30 mins
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
// Report as 100% synced if the latest block is within the last 60 mins
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
return 100;
}
@@ -183,6 +183,18 @@ public class Synchronizer extends Thread {
}
}
public Integer getBlocksRemaining() {
synchronized (this.syncLock) {
// Report as 0 blocks remaining if the latest block is within the last 60 mins
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
return 0;
}
return this.isSynchronizing ? this.blocksRemaining : null;
}
}
public void requestSync() {
requestSync = true;
}
@@ -235,6 +247,9 @@ public class Synchronizer extends Thread {
// Disregard peers that are on the same block as last sync attempt and we didn't like their chain
peers.removeIf(Controller.hasInferiorChainTip);
// Disregard peers that have a block with an invalid signer
peers.removeIf(Controller.hasInvalidSigner);
final int peersBeforeComparison = peers.size();
// Request recent block summaries from the remaining peers, and locate our common block with each
@@ -284,7 +299,7 @@ public class Synchronizer extends Thread {
BlockData priorChainTip = Controller.getInstance().getChainTip();
synchronized (this.syncLock) {
this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getLastHeight();
this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getHeight();
// Only update SysTray if we're potentially changing height
if (this.syncPercent < 100) {
@@ -314,7 +329,7 @@ public class Synchronizer extends Thread {
case INFERIOR_CHAIN: {
// Update our list of inferior chain tips
ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
ByteArray inferiorChainSignature = ByteArray.wrap(peer.getChainTipData().getSignature());
if (!inferiorChainSignatures.contains(inferiorChainSignature))
inferiorChainSignatures.add(inferiorChainSignature);
@@ -322,7 +337,8 @@ public class Synchronizer extends Thread {
LOGGER.debug(() -> String.format("Refused to synchronize with peer %s (%s)", peer, syncResult.name()));
// Notify peer of our superior chain
if (!peer.sendMessage(Network.getInstance().buildHeightMessage(peer, priorChainTip)))
Message message = Network.getInstance().buildHeightOrChainTipInfo(peer);
if (message == null || !peer.sendMessage(message))
peer.disconnect("failed to notify peer of our superior chain");
break;
}
@@ -343,7 +359,7 @@ public class Synchronizer extends Thread {
// fall-through...
case NOTHING_TO_DO: {
// Update our list of inferior chain tips
ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
ByteArray inferiorChainSignature = ByteArray.wrap(peer.getChainTipData().getSignature());
if (!inferiorChainSignatures.contains(inferiorChainSignature))
inferiorChainSignatures.add(inferiorChainSignature);
@@ -371,8 +387,7 @@ public class Synchronizer extends Thread {
// Reset our cache of inferior chains
inferiorChainSignatures.clear();
Network network = Network.getInstance();
network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newChainTip));
Network.getInstance().broadcastOurChain();
EventBus.INSTANCE.notify(new NewChainTipEvent(priorChainTip, newChainTip));
}
@@ -399,9 +414,10 @@ public class Synchronizer extends Thread {
timePeersLastAvailable = NTP.getTime();
// If enough time has passed, enter recovery mode, which lifts some restrictions on who we can sync with and when we can mint
if (NTP.getTime() - timePeersLastAvailable > RECOVERY_MODE_TIMEOUT) {
long recoveryModeTimeout = Settings.getInstance().getRecoveryModeTimeout();
if (NTP.getTime() - timePeersLastAvailable > recoveryModeTimeout) {
if (recoveryMode == false) {
LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", RECOVERY_MODE_TIMEOUT/60/1000));
LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", recoveryModeTimeout/60/1000));
recoveryMode = true;
}
}
@@ -419,7 +435,7 @@ public class Synchronizer extends Thread {
public void addInferiorChainSignature(byte[] inferiorSignature) {
// Update our list of inferior chain tips
ByteArray inferiorChainSignature = new ByteArray(inferiorSignature);
ByteArray inferiorChainSignature = ByteArray.wrap(inferiorSignature);
if (!inferiorChainSignatures.contains(inferiorChainSignature))
inferiorChainSignatures.add(inferiorChainSignature);
}
@@ -515,13 +531,13 @@ public class Synchronizer extends Thread {
final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
final int ourInitialHeight = ourLatestBlockData.getHeight();
PeerChainTipData peerChainTipData = peer.getChainTipData();
int peerHeight = peerChainTipData.getLastHeight();
byte[] peersLastBlockSignature = peerChainTipData.getLastBlockSignature();
BlockSummaryData peerChainTipData = peer.getChainTipData();
int peerHeight = peerChainTipData.getHeight();
byte[] peersLastBlockSignature = peerChainTipData.getSignature();
byte[] ourLastBlockSignature = ourLatestBlockData.getSignature();
LOGGER.debug(String.format("Fetching summaries from peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer,
peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(),
peerHeight, Base58.encode(peersLastBlockSignature), peerChainTipData.getTimestamp(),
ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp()));
List<BlockSummaryData> peerBlockSummaries = new ArrayList<>();
@@ -619,7 +635,7 @@ public class Synchronizer extends Thread {
// We have already determined that the correct chain diverged from a lower height. We are safe to skip these peers.
for (Peer peer : peersSharingCommonBlock) {
LOGGER.debug(String.format("Peer %s has common block at height %d but the superior chain is at height %d. Removing it from this round.", peer, commonBlockSummary.getHeight(), dropPeersAfterCommonBlockHeight));
this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
//this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
}
continue;
}
@@ -630,16 +646,18 @@ public class Synchronizer extends Thread {
int minChainLength = this.calculateMinChainLengthOfPeers(peersSharingCommonBlock, commonBlockSummary);
// Fetch block summaries from each peer
for (Peer peer : peersSharingCommonBlock) {
Iterator peersSharingCommonBlockIterator = peersSharingCommonBlock.iterator();
while (peersSharingCommonBlockIterator.hasNext()) {
Peer peer = (Peer) peersSharingCommonBlockIterator.next();
// If we're shutting down, just return the latest peer list
if (Controller.isStopping())
return peers;
// Count the number of blocks this peer has beyond our common block
final PeerChainTipData peerChainTipData = peer.getChainTipData();
final int peerHeight = peerChainTipData.getLastHeight();
final byte[] peerLastBlockSignature = peerChainTipData.getLastBlockSignature();
final BlockSummaryData peerChainTipData = peer.getChainTipData();
final int peerHeight = peerChainTipData.getHeight();
final byte[] peerLastBlockSignature = peerChainTipData.getSignature();
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
// Limit the number of blocks we are comparing. FUTURE: we could request more in batches, but there may not be a case when this is needed
int summariesRequired = Math.min(peerAdditionalBlocksAfterCommonBlock, MAXIMUM_REQUEST_SIZE);
@@ -687,6 +705,8 @@ public class Synchronizer extends Thread {
if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer);
peers.remove(peer);
peersSharingCommonBlockIterator.remove();
continue;
}
// Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength
@@ -725,8 +745,9 @@ public class Synchronizer extends Thread {
LOGGER.debug(String.format("Listing peers with common block %.8s...", Base58.encode(commonBlockSummary.getSignature())));
for (Peer peer : peersSharingCommonBlock) {
final int peerHeight = peer.getChainTipData().getLastHeight();
final Long peerLastBlockTimestamp = peer.getChainTipData().getLastBlockTimestamp();
BlockSummaryData peerChainTipData = peer.getChainTipData();
final int peerHeight = peerChainTipData.getHeight();
final Long peerLastBlockTimestamp = peerChainTipData.getTimestamp();
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
final CommonBlockData peerCommonBlockData = peer.getCommonBlockData();
@@ -823,7 +844,7 @@ public class Synchronizer extends Thread {
// Calculate the length of the shortest peer chain sharing this common block
int minChainLength = 0;
for (Peer peer : peersSharingCommonBlock) {
final int peerHeight = peer.getChainTipData().getLastHeight();
final int peerHeight = peer.getChainTipData().getHeight();
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
if (peerAdditionalBlocksAfterCommonBlock < minChainLength || minChainLength == 0)
@@ -842,6 +863,10 @@ public class Synchronizer extends Thread {
/* Invalid block signature tracking */
public Map<ByteArray, Long> getInvalidBlockSignatures() {
return this.invalidBlockSignatures;
}
private void addInvalidBlockSignature(byte[] signature) {
Long now = NTP.getTime();
if (now == null) {
@@ -849,8 +874,7 @@ public class Synchronizer extends Thread {
}
// Add or update existing entry
String sig58 = Base58.encode(signature);
invalidBlockSignatures.put(sig58, now);
invalidBlockSignatures.put(ByteArray.wrap(signature), now);
}
private void deleteOlderInvalidSignatures(Long now) {
if (now == null) {
@@ -869,17 +893,16 @@ public class Synchronizer extends Thread {
}
}
}
private boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
public boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
if (blockSummaries == null || invalidBlockSignatures == null) {
return false;
}
// Loop through our known invalid blocks and check each one against supplied block summaries
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
byte[] invalidSignature = Base58.decode(invalidSignature58);
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
for (BlockSummaryData blockSummary : blockSummaries) {
byte[] signature = blockSummary.getSignature();
if (Arrays.equals(signature, invalidSignature)) {
if (Arrays.equals(signature, invalidSignature.value)) {
return true;
}
}
@@ -892,10 +915,9 @@ public class Synchronizer extends Thread {
}
// Loop through our known invalid blocks and check each one against supplied block signatures
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
byte[] invalidSignature = Base58.decode(invalidSignature58);
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
for (byte[] signature : blockSignatures) {
if (Arrays.equals(signature, invalidSignature)) {
if (Arrays.equals(signature, invalidSignature.value)) {
return true;
}
}
@@ -930,13 +952,13 @@ public class Synchronizer extends Thread {
final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
final int ourInitialHeight = ourLatestBlockData.getHeight();
PeerChainTipData peerChainTipData = peer.getChainTipData();
int peerHeight = peerChainTipData.getLastHeight();
byte[] peersLastBlockSignature = peerChainTipData.getLastBlockSignature();
BlockSummaryData peerChainTipData = peer.getChainTipData();
int peerHeight = peerChainTipData.getHeight();
byte[] peersLastBlockSignature = peerChainTipData.getSignature();
byte[] ourLastBlockSignature = ourLatestBlockData.getSignature();
String syncString = String.format("Synchronizing with peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer,
peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(),
peerHeight, Base58.encode(peersLastBlockSignature), peerChainTipData.getTimestamp(),
ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp());
LOGGER.info(syncString);
@@ -1099,6 +1121,7 @@ public class Synchronizer extends Thread {
// If common block is too far behind us then we're on massively different forks so give up.
if (!force && testHeight < ourHeight - MAXIMUM_COMMON_DELTA) {
LOGGER.info(String.format("Blockchain too divergent with peer %s", peer));
peer.setLastTooDivergentTime(NTP.getTime());
return SynchronizationResult.TOO_DIVERGENT;
}
@@ -1108,6 +1131,9 @@ public class Synchronizer extends Thread {
testHeight = Math.max(testHeight - step, 1);
}
// Peer not considered too divergent
peer.setLastTooDivergentTime(0L);
// Prepend test block's summary as first block summary, as summaries returned are *after* test block
BlockSummaryData testBlockSummary = new BlockSummaryData(testBlockData);
blockSummariesFromCommon.add(0, testBlockSummary);
@@ -1243,7 +1269,14 @@ public class Synchronizer extends Thread {
int numberSignaturesRequired = additionalPeerBlocksAfterCommonBlock - peerBlockSignatures.size();
int retryCount = 0;
while (height < peerHeight) {
// Keep fetching blocks from peer until we reach their tip, or reach a count of MAXIMUM_COMMON_DELTA blocks.
// We need to limit the total number, otherwise too much can be loaded into memory, causing an
// OutOfMemoryException. This is common when syncing from 1000+ blocks behind the chain tip, after starting
// from a small fork that didn't become part of the main chain. This causes the entire sync process to
// use syncToPeerChain(), resulting in potentially thousands of blocks being held in memory if the limit
// below isn't applied.
while (height < peerHeight && peerBlocks.size() <= MAXIMUM_COMMON_DELTA) {
if (Controller.isStopping())
return SynchronizationResult.SHUTTING_DOWN;
@@ -1310,7 +1343,7 @@ public class Synchronizer extends Thread {
// Final check to make sure the peer isn't out of date (except for when we're in recovery mode)
if (!recoveryMode && peer.getChainTipData() != null) {
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
final Long peerLastBlockTimestamp = peer.getChainTipData().getLastBlockTimestamp();
final Long peerLastBlockTimestamp = peer.getChainTipData().getTimestamp();
if (peerLastBlockTimestamp == null || peerLastBlockTimestamp < minLatestBlockTimestamp) {
LOGGER.info(String.format("Peer %s is out of date, so abandoning sync attempt", peer));
return SynchronizationResult.CHAIN_TIP_TOO_OLD;
@@ -1445,6 +1478,12 @@ public class Synchronizer extends Thread {
repository.saveChanges();
synchronized (this.syncLock) {
if (peer.getChainTipData() != null) {
this.blocksRemaining = peer.getChainTipData().getHeight() - newBlock.getBlockData().getHeight();
}
}
Controller.getInstance().onNewBlock(newBlock.getBlockData());
}
@@ -1540,6 +1579,12 @@ public class Synchronizer extends Thread {
repository.saveChanges();
synchronized (this.syncLock) {
if (peer.getChainTipData() != null) {
this.blocksRemaining = peer.getChainTipData().getHeight() - newBlock.getBlockData().getHeight();
}
}
Controller.getInstance().onNewBlock(newBlock.getBlockData());
}
@@ -1550,12 +1595,19 @@ public class Synchronizer extends Thread {
Message getBlockSummariesMessage = new GetBlockSummariesMessage(parentSignature, numberRequested);
Message message = peer.getResponse(getBlockSummariesMessage);
if (message == null || message.getType() != MessageType.BLOCK_SUMMARIES)
if (message == null)
return null;
BlockSummariesMessage blockSummariesMessage = (BlockSummariesMessage) message;
if (message.getType() == MessageType.BLOCK_SUMMARIES) {
BlockSummariesMessage blockSummariesMessage = (BlockSummariesMessage) message;
return blockSummariesMessage.getBlockSummaries();
}
else if (message.getType() == MessageType.BLOCK_SUMMARIES_V2) {
BlockSummariesV2Message blockSummariesMessage = (BlockSummariesV2Message) message;
return blockSummariesMessage.getBlockSummaries();
}
return blockSummariesMessage.getBlockSummaries();
return null;
}
private List<byte[]> getBlockSignatures(Peer peer, byte[] parentSignature, int numberRequested) throws InterruptedException {
@@ -1574,12 +1626,35 @@ public class Synchronizer extends Thread {
Message getBlockMessage = new GetBlockMessage(signature);
Message message = peer.getResponse(getBlockMessage);
if (message == null || message.getType() != MessageType.BLOCK)
if (message == null) {
peer.getPeerData().incrementFailedSyncCount();
if (peer.getPeerData().getFailedSyncCount() >= MAX_CONSECUTIVE_FAILED_SYNC_ATTEMPTS) {
// Several failed attempts, so mark peer as misbehaved
LOGGER.info("Marking peer {} as misbehaved due to {} failed sync attempts", peer, peer.getPeerData().getFailedSyncCount());
Network.getInstance().peerMisbehaved(peer);
}
return null;
}
BlockMessage blockMessage = (BlockMessage) message;
// Reset failed sync count now that we have a block response
// FUTURE: we could move this to the end of the sync process, but to reduce risk this can be done
// at a later stage. For now we are only defending against serialization errors or no responses.
peer.getPeerData().setFailedSyncCount(0);
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
switch (message.getType()) {
case BLOCK: {
BlockMessage blockMessage = (BlockMessage) message;
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
}
case BLOCK_V2: {
BlockV2Message blockMessage = (BlockV2Message) message;
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStatesHash());
}
default:
return null;
}
}
public void populateBlockSummariesMinterLevels(Repository repository, List<BlockSummaryData> blockSummaries) throws DataException {

View File

@@ -2,7 +2,9 @@ package org.qortal.controller;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.GetTransactionMessage;
import org.qortal.network.message.Message;
@@ -11,13 +13,15 @@ import org.qortal.network.message.TransactionSignaturesMessage;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.transform.TransformationException;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
public class TransactionImporter extends Thread {
@@ -54,12 +58,16 @@ public class TransactionImporter extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Transaction Importer");
try {
while (!Controller.isStopping()) {
Thread.sleep(1000L);
Thread.sleep(500L);
// Process incoming transactions queue
processIncomingTransactionsQueue();
validateTransactionsInQueue();
importTransactionsInQueue();
// Clean up invalid incoming transactions list
cleanupInvalidTransactionsList(NTP.getTime());
}
@@ -86,7 +94,26 @@ public class TransactionImporter extends Thread {
incomingTransactions.keySet().removeIf(t -> Arrays.equals(t.getSignature(), signature));
}
private void processIncomingTransactionsQueue() {
/**
* Retrieve all pending unconfirmed transactions that have had their signatures validated.
* @return a list of TransactionData objects, with valid signatures.
*/
private List<TransactionData> getCachedSigValidTransactions() {
synchronized (this.incomingTransactions) {
return this.incomingTransactions.entrySet().stream()
.filter(t -> Boolean.TRUE.equals(t.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
}
/**
* Validate the signatures of any transactions pending import, then update their
* entries in the queue to mark them as valid/invalid.
*
* No database lock is required.
*/
private void validateTransactionsInQueue() {
if (this.incomingTransactions.isEmpty()) {
// Nothing to do?
return;
@@ -103,8 +130,17 @@ public class TransactionImporter extends Thread {
LOGGER.debug("Validating signatures in incoming transactions queue (size {})...", unvalidatedCount);
}
// A list of all currently pending transactions that have valid signatures
List<Transaction> sigValidTransactions = new ArrayList<>();
// A list of signatures that became valid in this round
List<byte[]> newlyValidSignatures = new ArrayList<>();
boolean isLiteNode = Settings.getInstance().isLite();
// We need the latest block in order to check for expired transactions
BlockData latestBlock = Controller.getInstance().getChainTip();
// Signature validation round - does not require blockchain lock
for (Map.Entry<TransactionData, Boolean> transactionEntry : incomingTransactionsCopy.entrySet()) {
// Quick exit?
@@ -114,34 +150,59 @@ public class TransactionImporter extends Thread {
TransactionData transactionData = transactionEntry.getKey();
Transaction transaction = Transaction.fromData(repository, transactionData);
String signature58 = Base58.encode(transactionData.getSignature());
Long now = NTP.getTime();
if (now == null) {
return;
}
// Drop expired transactions before they are considered "sig valid"
if (latestBlock != null && transaction.getDeadline() <= latestBlock.getTimestamp()) {
LOGGER.debug("Removing expired {} transaction {} from import queue", transactionData.getType().name(), signature58);
removeIncomingTransaction(transactionData.getSignature());
invalidUnconfirmedTransactions.put(signature58, (now + EXPIRED_TRANSACTION_RECHECK_INTERVAL));
continue;
}
// Only validate signature if we haven't already done so
Boolean isSigValid = transactionEntry.getValue();
if (!Boolean.TRUE.equals(isSigValid)) {
if (!transaction.isSignatureValid()) {
String signature58 = Base58.encode(transactionData.getSignature());
if (isLiteNode) {
// Lite nodes can't easily validate transactions, so for now we will have to assume that everything is valid
sigValidTransactions.add(transaction);
newlyValidSignatures.add(transactionData.getSignature());
// Add mark signature as valid if transaction still exists in import queue
incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
continue;
}
LOGGER.trace("Ignoring {} transaction {} with invalid signature", transactionData.getType().name(), signature58);
if (!transaction.isSignatureValid()) {
LOGGER.debug("Ignoring {} transaction {} with invalid signature", transactionData.getType().name(), signature58);
removeIncomingTransaction(transactionData.getSignature());
// Also add to invalidIncomingTransactions map
Long now = NTP.getTime();
now = NTP.getTime();
if (now != null) {
Long expiry = now + INVALID_TRANSACTION_RECHECK_INTERVAL;
LOGGER.trace("Adding stale invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
LOGGER.trace("Adding invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
// Add to invalidUnconfirmedTransactions so that we don't keep requesting it
invalidUnconfirmedTransactions.put(signature58, expiry);
}
// We're done with this transaction
continue;
}
else {
// Count the number that were validated in this round, for logging purposes
validatedCount++;
}
// Count the number that were validated in this round, for logging purposes
validatedCount++;
// Add mark signature as valid if transaction still exists in import queue
incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
// Signature validated in this round
newlyValidSignatures.add(transactionData.getSignature());
} else {
LOGGER.trace(() -> String.format("Transaction %s known to have valid signature", Base58.encode(transactionData.getSignature())));
}
@@ -154,30 +215,44 @@ public class TransactionImporter extends Thread {
LOGGER.debug("Finished validating signatures in incoming transactions queue (valid this round: {}, total pending import: {})...", validatedCount, sigValidTransactions.size());
}
if (sigValidTransactions.isEmpty()) {
// Don't bother locking if there are no new transactions to process
return;
if (!newlyValidSignatures.isEmpty()) {
LOGGER.debug("Broadcasting {} newly valid signatures ahead of import", newlyValidSignatures.size());
Message newTransactionSignatureMessage = new TransactionSignaturesMessage(newlyValidSignatures);
Network.getInstance().broadcast(broadcastPeer -> newTransactionSignatureMessage);
}
if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) {
// Prioritize syncing, and don't attempt to lock
// Signature validity is retained in the incomingTransactions map, to avoid the above work being wasted
return;
}
} catch (DataException e) {
LOGGER.error("Repository issue while processing incoming transactions", e);
}
}
try {
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (!blockchainLock.tryLock(2, TimeUnit.SECONDS)) {
// Signature validity is retained in the incomingTransactions map, to avoid the above work being wasted
LOGGER.debug("Too busy to process incoming transactions queue");
return;
}
} catch (InterruptedException e) {
LOGGER.debug("Interrupted when trying to acquire blockchain lock");
return;
}
/**
* Import any transactions in the queue that have valid signatures.
*
* A database lock is required.
*/
private void importTransactionsInQueue() {
List<TransactionData> sigValidTransactions = this.getCachedSigValidTransactions();
if (sigValidTransactions.isEmpty()) {
// Don't bother locking if there are no new transactions to process
return;
}
LOGGER.debug("Processing incoming transactions queue (size {})...", sigValidTransactions.size());
if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) {
// Prioritize syncing, and don't attempt to lock
return;
}
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
if (!blockchainLock.tryLock()) {
LOGGER.debug("Too busy to import incoming transactions queue");
return;
}
LOGGER.debug("Importing incoming transactions queue (size {})...", sigValidTransactions.size());
int processedCount = 0;
try (final Repository repository = RepositoryManager.getRepository()) {
// Import transactions with valid signatures
try {
@@ -187,14 +262,15 @@ public class TransactionImporter extends Thread {
}
if (Synchronizer.getInstance().isSyncRequestPending()) {
LOGGER.debug("Breaking out of transaction processing with {} remaining, because a sync request is pending", sigValidTransactions.size() - i);
LOGGER.debug("Breaking out of transaction importing with {} remaining, because a sync request is pending", sigValidTransactions.size() - i);
return;
}
Transaction transaction = sigValidTransactions.get(i);
TransactionData transactionData = transaction.getTransactionData();
TransactionData transactionData = sigValidTransactions.get(i);
Transaction transaction = Transaction.fromData(repository, transactionData);
Transaction.ValidationResult validationResult = transaction.importAsUnconfirmed();
processedCount++;
switch (validationResult) {
case TRANSACTION_ALREADY_EXISTS: {
@@ -216,7 +292,7 @@ public class TransactionImporter extends Thread {
// All other invalid cases:
default: {
final String signature58 = Base58.encode(transactionData.getSignature());
LOGGER.trace(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58));
LOGGER.debug(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58));
Long now = NTP.getTime();
if (now != null && now - transactionData.getTimestamp() > INVALID_TRANSACTION_STALE_TIMEOUT) {
@@ -239,12 +315,11 @@ public class TransactionImporter extends Thread {
removeIncomingTransaction(transactionData.getSignature());
}
} finally {
LOGGER.debug("Finished processing incoming transactions queue");
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
LOGGER.debug("Finished importing {} incoming transaction{}", processedCount, (processedCount == 1 ? "" : "s"));
blockchainLock.unlock();
}
} catch (DataException e) {
LOGGER.error("Repository issue while processing incoming transactions", e);
LOGGER.error("Repository issue while importing incoming transactions", e);
}
}
@@ -277,8 +352,18 @@ public class TransactionImporter extends Thread {
byte[] signature = getTransactionMessage.getSignature();
try (final Repository repository = RepositoryManager.getRepository()) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
// Firstly check the sig-valid transactions that are currently queued for import
TransactionData transactionData = this.getCachedSigValidTransactions().stream()
.filter(t -> Arrays.equals(signature, t.getSignature()))
.findFirst().orElse(null);
if (transactionData == null) {
// Not found in import queue, so try the database
transactionData = repository.getTransactionRepository().fromSignature(signature);
}
if (transactionData == null) {
// Still not found - so we don't have this transaction
LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
// Send no response at all???
return;
@@ -289,7 +374,9 @@ public class TransactionImporter extends Thread {
if (!peer.sendMessage(transactionMessage))
peer.disconnect("failed to send transaction");
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while send transaction %s to peer %s", Base58.encode(signature), peer), e);
LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
} catch (TransformationException e) {
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
}
}

View File

@@ -11,10 +11,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import org.qortal.utils.*;
import java.io.File;
import java.io.IOException;
@@ -137,7 +134,7 @@ public class ArbitraryDataCleanupManager extends Thread {
// Fetch the transaction data
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
if (arbitraryTransactionData == null) {
if (arbitraryTransactionData == null || arbitraryTransactionData.getService() == null) {
continue;
}
@@ -204,7 +201,7 @@ public class ArbitraryDataCleanupManager extends Thread {
if (completeFileExists && !allChunksExist) {
// We have the complete file but not the chunks, so let's convert it
LOGGER.info(String.format("Transaction %s has complete file but no chunks",
LOGGER.debug(String.format("Transaction %s has complete file but no chunks",
Base58.encode(arbitraryTransactionData.getSignature())));
ArbitraryTransactionUtils.convertFileToChunks(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
@@ -239,7 +236,7 @@ public class ArbitraryDataCleanupManager extends Thread {
// Delete random data associated with name if we're over our storage limit for this name
// Use the DELETION_THRESHOLD, for the same reasons as above
for (String followedName : storageManager.followedNames()) {
for (String followedName : ListUtils.followedNames()) {
if (isStopping) {
return;
}
@@ -349,6 +346,10 @@ public class ArbitraryDataCleanupManager extends Thread {
/**
* Iteratively walk through given directory and delete a single random file
*
* TODO: public data should be prioritized over private data
* (unless this node is part of a data market contract for that data).
* See: Service.privateServices() for a list of services containing private data.
*
* @param directory - the base directory
* @return boolean - whether a file was deleted
*/
@@ -487,7 +488,7 @@ public class ArbitraryDataCleanupManager extends Thread {
// Delete data relating to blocked names
String name = directory.getName();
if (name != null && storageManager.isNameBlocked(name)) {
if (name != null && ListUtils.isNameBlocked(name)) {
this.safeDeleteDirectory(directory, "blocked name");
}

View File

@@ -20,6 +20,7 @@ import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.ListUtils;
import org.qortal.utils.NTP;
import org.qortal.utils.Triple;
@@ -67,6 +68,9 @@ public class ArbitraryDataFileListManager {
/** Maximum number of hops that a file list relay request is allowed to make */
public static int RELAY_REQUEST_MAX_HOPS = 4;
/** Minimum peer version to use relay */
public static String RELAY_MIN_PEER_VERSION = "3.4.0";
private ArbitraryDataFileListManager() {
}
@@ -120,19 +124,29 @@ public class ArbitraryDataFileListManager {
}
}
// Then allow another 5 attempts, each 5 minutes apart
if (timeSinceLastAttempt > 5 * 60 * 1000L) {
// We haven't tried for at least 5 minutes
// Then allow another 5 attempts, each 1 minute apart
if (timeSinceLastAttempt > 60 * 1000L) {
// We haven't tried for at least 1 minute
if (networkBroadcastCount < 5) {
// We've made less than 5 total attempts
if (networkBroadcastCount < 8) {
// We've made less than 8 total attempts
return true;
}
}
// From then on, only try once every 24 hours, to reduce network spam
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
// We haven't tried for at least 24 hours
// Then allow another 8 attempts, each 15 minutes apart
if (timeSinceLastAttempt > 15 * 60 * 1000L) {
// We haven't tried for at least 15 minutes
if (networkBroadcastCount < 16) {
// We've made less than 16 total attempts
return true;
}
}
// From then on, only try once every 6 hours, to reduce network spam
if (timeSinceLastAttempt > 6 * 60 * 60 * 1000L) {
// We haven't tried for at least 6 hours
return true;
}
@@ -184,8 +198,8 @@ public class ArbitraryDataFileListManager {
}
}
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
// We haven't tried for at least 24 hours
if (timeSinceLastAttempt > 60 * 60 * 1000L) {
// We haven't tried for at least 1 hour
return true;
}
@@ -245,8 +259,6 @@ public class ArbitraryDataFileListManager {
// Lookup file lists by signature (and optionally hashes)
public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
byte[] digest = arbitraryTransactionData.getData();
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
byte[] signature = arbitraryTransactionData.getSignature();
String signature58 = Base58.encode(signature);
@@ -273,8 +285,7 @@ public class ArbitraryDataFileListManager {
// Find hashes that we are missing
try {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
missingHashes = arbitraryDataFile.missingHashes();
} catch (DataException e) {
// Leave missingHashes as null, so that all hashes are requested
@@ -283,8 +294,8 @@ public class ArbitraryDataFileListManager {
LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to %d peers...", signature58, hashCount, handshakedPeers.size()));
// FUTURE: send our address as requestingPeer once enough peers have switched to the new protocol
String requestingPeer = null; // Network.getInstance().getOurExternalIpAddressAndPort();
// Send our address as requestingPeer, to allow for potential direct connections with seeds/peers
String requestingPeer = Network.getInstance().getOurExternalIpAddressAndPort();
// Build request
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0, requestingPeer);
@@ -447,10 +458,9 @@ public class ArbitraryDataFileListManager {
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash());
// // Load data file(s)
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
//
// // Check all hashes exist
// for (byte[] hash : hashes) {
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
@@ -494,7 +504,7 @@ public class ArbitraryDataFileListManager {
// Forwarding
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
boolean isBlocked = (arbitraryTransactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(arbitraryTransactionData.getName()));
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
if (!isBlocked) {
Peer requestingPeer = request.getB();
if (requestingPeer != null) {
@@ -511,18 +521,24 @@ public class ArbitraryDataFileListManager {
// Bump requestHops if it exists
if (requestHops != null) {
arbitraryDataFileListMessage.setRequestHops(++requestHops);
requestHops++;
}
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
arbitraryDataFileListMessage.removeOptionalStats();
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
}
forwardArbitraryDataFileListMessage.setId(message.getId());
// Forward to requesting peer
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) {
if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
requestingPeer.disconnect("failed to forward arbitrary data file list");
}
}
@@ -575,12 +591,8 @@ public class ArbitraryDataFileListManager {
// Check if we're even allowed to serve data for this transaction
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
byte[] hash = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
// Load file(s) and add any that exist to the list of hashes
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
if (requestedHashes == null || requestedHashes.isEmpty()) {
@@ -631,6 +643,9 @@ public class ArbitraryDataFileListManager {
// We should only respond if we have at least one hash
if (hashes.size() > 0) {
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
// We have all the chunks, so update requests map to reflect that we've sent it
// There is no need to keep track of the request, as we can serve all the chunks
if (allChunksExist) {
@@ -639,16 +654,19 @@ public class ArbitraryDataFileListManager {
}
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
hashes, NTP.getTime(), 0, ourAddress, true);
arbitraryDataFileListMessage.setId(message.getId());
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
arbitraryDataFileListMessage.removeOptionalStats();
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
} else {
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
hashes, NTP.getTime(), 0, ourAddress, true);
}
arbitraryDataFileListMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
LOGGER.debug("Couldn't send list of hashes");
peer.disconnect("failed to send list of hashes");
@@ -665,13 +683,12 @@ public class ArbitraryDataFileListManager {
}
// We may need to forward this request on
boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName()));
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
int requestHops = getArbitraryDataFileListMessage.getRequestHops();
getArbitraryDataFileListMessage.setRequestHops(++requestHops);
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
long totalRequestTime = now - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
@@ -679,11 +696,15 @@ public class ArbitraryDataFileListManager {
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
relayGetArbitraryDataFileListMessage.setId(message.getId());
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer -> broadcastPeer == peer ||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())
? null : getArbitraryDataFileListMessage);
broadcastPeer ->
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
);
}
else {

View File

@@ -1,5 +1,6 @@
package org.qortal.controller.arbitrary;
import com.google.common.net.InetAddresses;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
@@ -7,7 +8,6 @@ import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryDirectConnectionInfo;
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
import org.qortal.data.arbitrary.ArbitraryRelayInfo;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.network.Network;
@@ -55,6 +55,13 @@ public class ArbitraryDataFileManager extends Thread {
*/
private List<ArbitraryDirectConnectionInfo> directConnectionInfo = Collections.synchronizedList(new ArrayList<>());
/**
* Map to keep track of peers requesting QDN data that we hold.
* Key = peer address string, value = time of last request.
* This allows for additional "burst" connections beyond existing limits.
*/
private Map<String, Long> recentDataRequests = Collections.synchronizedMap(new HashMap<>());
public static int MAX_FILE_HASH_RESPONSES = 1000;
@@ -75,7 +82,7 @@ public class ArbitraryDataFileManager extends Thread {
try {
// Use a fixed thread pool to execute the arbitrary data file requests
int threadCount = 10;
int threadCount = 5;
ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
for (int i = 0; i < threadCount; i++) {
arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
@@ -109,6 +116,9 @@ public class ArbitraryDataFileManager extends Thread {
final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
final long recentDataRequestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RECENT_DATA_REQUESTS_TIMEOUT;
recentDataRequests.entrySet().removeIf(entry -> entry.getValue() < recentDataRequestMinimumTimestamp);
}
@@ -122,9 +132,7 @@ public class ArbitraryDataFileManager extends Thread {
List<byte[]> hashes) throws DataException {
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
arbitraryDataFile.setMetadataHash(metadataHash);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
boolean receivedAtLeastOneFile = false;
// Now fetch actual data from this peer
@@ -138,10 +146,10 @@ public class ArbitraryDataFileManager extends Thread {
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
Long startTime = NTP.getTime();
ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null);
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, signature, hash, null);
Long endTime = NTP.getTime();
if (receivedArbitraryDataFileMessage != null) {
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer, (endTime-startTime));
if (receivedArbitraryDataFile != null) {
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
receivedAtLeastOneFile = true;
// Remove this hash from arbitraryDataFileHashResponses now that we have received it
@@ -183,11 +191,11 @@ public class ArbitraryDataFileManager extends Thread {
return receivedAtLeastOneFile;
}
private ArbitraryDataFileMessage fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
boolean fileAlreadyExists = existingFile.exists();
String hash58 = Base58.encode(hash);
Message message = null;
ArbitraryDataFile arbitraryDataFile;
// Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) {
@@ -195,10 +203,11 @@ public class ArbitraryDataFileManager extends Thread {
arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
Message response = null;
try {
message = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
} catch (InterruptedException e) {
// Will return below due to null message
// Will return below due to null response
}
arbitraryDataFileRequests.remove(hash58);
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
@@ -206,36 +215,42 @@ public class ArbitraryDataFileManager extends Thread {
// We may need to remove the file list request, if we have all the files for this transaction
this.handleFileListRequests(signature);
if (message == null) {
LOGGER.debug("Received null message from peer {}", peer);
if (response == null) {
LOGGER.debug("Received null response from peer {}", peer);
return null;
}
if (message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) {
LOGGER.debug("Received message with invalid type: {} from peer {}", message.getType(), peer);
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
return null;
}
}
else {
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
} else {
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
arbitraryDataFile = existingFile;
}
if (arbitraryDataFile == null) {
// We don't have a file, so give up here
return null;
}
ArbitraryDataFileMessage arbitraryDataFileMessage = (ArbitraryDataFileMessage) message;
// We might want to forward the request to the peer that originally requested it
this.handleArbitraryDataFileForwarding(requestingPeer, message, originalMessage);
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
boolean isRelayRequest = (requestingPeer != null);
if (isRelayRequest) {
if (!fileAlreadyExists) {
// File didn't exist locally before the request, and it's a forwarding request, so delete it
LOGGER.debug("Deleting file {} because it was needed for forwarding only", Base58.encode(hash));
ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile();
// Keep trying to delete the data until it is deleted, or we reach 10 attempts
dataFile.delete(10);
arbitraryDataFile.delete(10);
}
}
return arbitraryDataFileMessage;
return arbitraryDataFile;
}
private void handleFileListRequests(byte[] signature) {
@@ -275,7 +290,7 @@ public class ArbitraryDataFileManager extends Thread {
// The ID needs to match that of the original request
message.setId(originalMessage.getId());
if (!requestingPeer.sendMessage(message)) {
if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
requestingPeer.disconnect("failed to forward arbitrary data file");
}
@@ -488,6 +503,45 @@ public class ArbitraryDataFileManager extends Thread {
}
// Peers requesting QDN data from us
/**
* Add an address string of a peer that is trying to request data from us.
* @param peerAddress
*/
public void addRecentDataRequest(String peerAddress) {
if (peerAddress == null) {
return;
}
Long now = NTP.getTime();
if (now == null) {
return;
}
// Make sure to remove the port, since it isn't guaranteed to match next time
String[] parts = peerAddress.split(":");
if (parts.length == 0) {
return;
}
String host = parts[0];
if (!InetAddresses.isInetAddress(host)) {
// Invalid host
return;
}
this.recentDataRequests.put(host, now);
}
public boolean isPeerRequestingData(String peerAddressWithoutPort) {
return this.recentDataRequests.containsKey(peerAddressWithoutPort);
}
public boolean hasPendingDataRequest() {
return !this.recentDataRequests.isEmpty();
}
// Network handlers
public void onNetworkGetArbitraryDataFileMessage(Peer peer, Message message) {
@@ -512,13 +566,16 @@ public class ArbitraryDataFileManager extends Thread {
LOGGER.trace("Hash {} exists", hash58);
// We can serve the file directly as we already have it
LOGGER.debug("Sending file {}...", arbitraryDataFile);
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
arbitraryDataFileMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileMessage)) {
LOGGER.debug("Couldn't sent file");
if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
peer.disconnect("failed to send file");
}
LOGGER.debug("Sent file {}", arbitraryDataFile);
else {
LOGGER.debug("Sent file {}", arbitraryDataFile);
}
}
else if (relayInfo != null) {
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
@@ -543,9 +600,10 @@ public class ArbitraryDataFileManager extends Thread {
// Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile));
// We'll send empty block summaries message as it's very short
// TODO: use a different message type here
Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList());
// Send generic 'unknown' message as it's very short
Message fileUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION
? new GenericUnknownMessage()
: new BlockSummariesMessage(Collections.emptyList());
fileUnknownMessage.setId(message.getId());
if (!peer.sendMessage(fileUnknownMessage)) {
LOGGER.debug("Couldn't sent file-unknown response");

View File

@@ -114,7 +114,7 @@ public class ArbitraryDataFileRequestThread implements Runnable {
return;
}
LOGGER.debug("Fetching file {} from peer {} via request thread...", hash58, peer);
LOGGER.trace("Fetching file {} from peer {} via request thread...", hash58, peer);
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
} catch (DataException e) {

View File

@@ -16,7 +16,6 @@ import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.repository.DataException;
@@ -27,6 +26,7 @@ import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.ListUtils;
import org.qortal.utils.NTP;
public class ArbitraryDataManager extends Thread {
@@ -47,6 +47,9 @@ public class ArbitraryDataManager extends Thread {
/** Maximum time to hold direct peer connection information */
public static final long ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT = 2 * 60 * 1000L; // ms
/** Maximum time to hold information about recent data requests that we can fulfil */
public static final long ARBITRARY_RECENT_DATA_REQUESTS_TIMEOUT = 2 * 60 * 1000L; // ms
/** Maximum number of hops that an arbitrary signatures request is allowed to make */
private static int ARBITRARY_SIGNATURES_REQUEST_MAX_HOPS = 3;
@@ -169,7 +172,7 @@ public class ArbitraryDataManager extends Thread {
private void processNames() throws InterruptedException {
// Fetch latest list of followed names
List<String> followedNames = ResourceListManager.getInstance().getStringsInList("followedNames");
List<String> followedNames = ListUtils.followedNames();
if (followedNames == null || followedNames.isEmpty()) {
return;
}
@@ -395,6 +398,11 @@ public class ArbitraryDataManager extends Thread {
// Entrypoint to request new metadata from peers
public ArbitraryDataTransactionMetadata fetchMetadata(ArbitraryTransactionData arbitraryTransactionData) {
if (arbitraryTransactionData.getService() == null) {
// Can't fetch metadata without a valid service
return null;
}
ArbitraryDataResource resource = new ArbitraryDataResource(
arbitraryTransactionData.getName(),
ArbitraryDataFile.ResourceIdType.NAME,
@@ -486,7 +494,7 @@ public class ArbitraryDataManager extends Thread {
public void invalidateCache(ArbitraryTransactionData arbitraryTransactionData) {
String signature58 = Base58.encode(arbitraryTransactionData.getSignature());
if (arbitraryTransactionData.getName() != null) {
if (arbitraryTransactionData.getName() != null && arbitraryTransactionData.getService() != null) {
String resourceId = arbitraryTransactionData.getName().toLowerCase();
Service service = arbitraryTransactionData.getService();
String identifier = arbitraryTransactionData.getIdentifier();

View File

@@ -5,15 +5,11 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import org.qortal.utils.*;
import java.io.IOException;
import java.io.UncheckedIOException;
@@ -48,7 +44,6 @@ public class ArbitraryDataStorageManager extends Thread {
private List<ArbitraryTransactionData> hostedTransactions;
private String searchQuery;
private List<ArbitraryTransactionData> searchResultsTransactions;
private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes
@@ -62,6 +57,8 @@ public class ArbitraryDataStorageManager extends Thread {
* This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */
public static final double DELETION_THRESHOLD = 0.98f; // 98%
private static final long PER_NAME_STORAGE_MULTIPLIER = 4L;
public ArbitraryDataStorageManager() {
}
@@ -136,11 +133,11 @@ public class ArbitraryDataStorageManager extends Thread {
case ALL:
case VIEWED:
// If the policy includes viewed data, we can host it as long as it's not blocked
return !this.isNameBlocked(name);
return !ListUtils.isNameBlocked(name);
case FOLLOWED:
// If the policy is for followed data only, we have to be following it
return this.isFollowingName(name);
return ListUtils.isFollowingName(name);
// For NONE or all else, we shouldn't host this data
case NONE:
@@ -189,14 +186,14 @@ public class ArbitraryDataStorageManager extends Thread {
}
// Never fetch data from blocked names, even if they are followed
if (this.isNameBlocked(name)) {
if (ListUtils.isNameBlocked(name)) {
return false;
}
switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED:
case FOLLOWED_OR_VIEWED:
return this.isFollowingName(name);
return ListUtils.isFollowingName(name);
case ALL:
return true;
@@ -236,7 +233,7 @@ public class ArbitraryDataStorageManager extends Thread {
* @return boolean - whether the resource is blocked or not
*/
public boolean isBlocked(ArbitraryTransactionData arbitraryTransactionData) {
return isNameBlocked(arbitraryTransactionData.getName());
return ListUtils.isNameBlocked(arbitraryTransactionData.getName());
}
private boolean isDataTypeAllowed(ArbitraryTransactionData arbitraryTransactionData) {
@@ -254,22 +251,6 @@ public class ArbitraryDataStorageManager extends Thread {
return true;
}
public boolean isNameBlocked(String name) {
return ResourceListManager.getInstance().listContains("blockedNames", name, false);
}
private boolean isFollowingName(String name) {
return ResourceListManager.getInstance().listContains("followedNames", name, false);
}
public List<String> followedNames() {
return ResourceListManager.getInstance().getStringsInList("followedNames");
}
private int followedNamesCount() {
return ResourceListManager.getInstance().getItemCountForList("followedNames");
}
public List<ArbitraryTransactionData> loadAllHostedTransactions(Repository repository) {
@@ -344,11 +325,6 @@ public class ArbitraryDataStorageManager extends Thread {
*/
public List<ArbitraryTransactionData> searchHostedTransactions(Repository repository, String query, Integer limit, Integer offset) {
// Load from results cache if we can (results that exists for the same query), to avoid disk reads
if (this.searchResultsTransactions != null && this.searchQuery.equals(query.toLowerCase())) {
return ArbitraryTransactionUtils.limitOffsetTransactions(this.searchResultsTransactions, limit, offset);
}
// Using cache if we can, to avoid disk reads
if (this.hostedTransactions == null) {
this.hostedTransactions = this.loadAllHostedTransactions(repository);
@@ -376,10 +352,7 @@ public class ArbitraryDataStorageManager extends Thread {
// Sort by newest first
searchResultsList.sort(Comparator.comparingLong(ArbitraryTransactionData::getTimestamp).reversed());
// Update cache
this.searchResultsTransactions = searchResultsList;
return ArbitraryTransactionUtils.limitOffsetTransactions(this.searchResultsTransactions, limit, offset);
return ArbitraryTransactionUtils.limitOffsetTransactions(searchResultsList, limit, offset);
}
/**
@@ -517,12 +490,17 @@ public class ArbitraryDataStorageManager extends Thread {
return false;
}
if (Settings.getInstance().getStoragePolicy() == StoragePolicy.ALL) {
// Using storage policy ALL, so don't limit anything per name
return true;
}
if (name == null) {
// This transaction doesn't have a name, so fall back to total space limitations
return true;
}
int followedNamesCount = this.followedNamesCount();
int followedNamesCount = ListUtils.followedNamesCount();
if (followedNamesCount == 0) {
// Not following any names, so we have space
return true;
@@ -552,14 +530,16 @@ public class ArbitraryDataStorageManager extends Thread {
}
public long storageCapacityPerName(double threshold) {
int followedNamesCount = this.followedNamesCount();
int followedNamesCount = ListUtils.followedNamesCount();
if (followedNamesCount == 0) {
// Not following any names, so we have the total space available
return this.getStorageCapacityIncludingThreshold(threshold);
}
double maxStorageCapacity = (double)this.storageCapacity * threshold;
long maxStoragePerName = (long)(maxStorageCapacity / (double)followedNamesCount);
// Some names won't need/use much space, so give all names a 4x multiplier to compensate
long maxStoragePerName = (long)(maxStorageCapacity / (double)followedNamesCount) * PER_NAME_STORAGE_MULTIPLIER;
return maxStoragePerName;
}

Some files were not shown because too many files have changed in this diff Show More