mirror of
https://github.com/Qortal/qortal.git
synced 2025-11-12 04:47:04 +00:00
Compare commits
1053 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c71dce92b5 | ||
|
|
34c3adf280 | ||
|
|
95a1c6bf8b | ||
|
|
36e944d7e2 | ||
|
|
f044166b81 | ||
|
|
aed1823afb | ||
|
|
6dfaaf0054 | ||
|
|
45bc2e46d6 | ||
|
|
46e2e1043d | ||
|
|
a3518d1f05 | ||
|
|
0a1ab3d685 | ||
|
|
5dbacc4db3 | ||
|
|
1ce2dcfb2b | ||
|
|
ed6333f82e | ||
|
|
f27c9193c7 | ||
|
|
e48529704c | ||
|
|
53508f9298 | ||
|
|
33aeec7e87 | ||
|
|
16dc23ddc7 | ||
|
|
e80494b784 | ||
|
|
111ec3b483 | ||
|
|
db4a9ee880 | ||
|
|
b1ebe1864b | ||
|
|
3c251c35ea | ||
|
|
4954a1744b | ||
|
|
a7bbad17d7 | ||
|
|
8ca9423c52 | ||
|
|
32b9b7e578 | ||
|
|
f045e10ada | ||
|
|
560282dc1d | ||
|
|
9cd6372161 | ||
|
|
2370a67b8a | ||
|
|
0993903aa0 | ||
|
|
f5e9b91d6b | ||
|
|
7fe507a497 | ||
|
|
10f12221c9 | ||
|
|
85980e4cfc | ||
|
|
7bb6b84e86 | ||
|
|
dc25d33739 | ||
|
|
358e67b050 | ||
|
|
8331241d75 | ||
|
|
e041748b48 | ||
|
|
06691af729 | ||
|
|
cfe6dfcd1c | ||
|
|
3f00cda847 | ||
|
|
a286db2dfd | ||
|
|
28bd4adcd2 | ||
|
|
61b7cdd025 | ||
|
|
250245d5e1 | ||
|
|
0258d2bcb6 | ||
|
|
735de93848 | ||
|
|
57485bfe36 | ||
|
|
ed05560413 | ||
|
|
892b667f86 | ||
|
|
ea7a2224d3 | ||
|
|
20893879ca | ||
|
|
b08e845dbb | ||
|
|
e60cd96514 | ||
|
|
e2a2a1f956 | ||
|
|
7f53983d77 | ||
|
|
ce52b39495 | ||
|
|
3296779125 | ||
|
|
3dcd9d237c | ||
|
|
23ec71d7be | ||
|
|
5bbde4dcdb | ||
|
|
dc2da8b283 | ||
|
|
f3772d19f5 | ||
|
|
35def54ecc | ||
|
|
2086a2c476 | ||
|
|
4835e5732d | ||
|
|
d831972005 | ||
|
|
f6914821d3 | ||
|
|
073d124aef | ||
|
|
a83e332c11 | ||
|
|
7deb9328fa | ||
|
|
e598d7476b | ||
|
|
85735fabb2 | ||
|
|
7392082875 | ||
|
|
88f8041b05 | ||
|
|
3109c3bb16 | ||
|
|
8d462dedfa | ||
|
|
fdd9741936 | ||
|
|
929d0ac897 | ||
|
|
952d18390b | ||
|
|
bc026d9d1c | ||
|
|
ea2577d1c3 | ||
|
|
c78593cf15 | ||
|
|
de4523c34e | ||
|
|
b08329dcf1 | ||
|
|
668be633c4 | ||
|
|
ea6225ab9a | ||
|
|
055b66e835 | ||
|
|
2a7a2d3220 | ||
|
|
73a7c1fe7e | ||
|
|
2848ae695c | ||
|
|
713fd4f0c6 | ||
|
|
519bb10c60 | ||
|
|
3a64336d9f | ||
|
|
5ecc633fd7 | ||
|
|
1b9afce21f | ||
|
|
f9f34a61ac | ||
|
|
46b225cdfb | ||
|
|
4ce3b2a786 | ||
|
|
87ed49a2ee | ||
|
|
a555f503eb | ||
|
|
50780aba53 | ||
|
|
2bee3cbb5c | ||
|
|
534a44d0ce | ||
|
|
469c1af0ef | ||
|
|
5656100197 | ||
|
|
d9cac6db39 | ||
|
|
98b0b1932d | ||
|
|
9968865d0e | ||
|
|
05eb337367 | ||
|
|
5386db8a3f | ||
|
|
edae7fd844 | ||
|
|
4840804d32 | ||
|
|
b5cb5f1da3 | ||
|
|
101023ba1d | ||
|
|
ed73162881 | ||
|
|
0388626e42 | ||
|
|
c5c0dcf0f2 | ||
|
|
384f592f59 | ||
|
|
1528e05e0b | ||
|
|
82c66c0555 | ||
|
|
b5ce8d5fb3 | ||
|
|
b4a736c5d2 | ||
|
|
4afbca7ed2 | ||
|
|
44aa0a6026 | ||
|
|
b1452bddf3 | ||
|
|
96ac883515 | ||
|
|
b6803490b9 | ||
|
|
3739920ad3 | ||
|
|
7f21ea7e00 | ||
|
|
83b0ce53e6 | ||
|
|
d6ab9eb066 | ||
|
|
ac60ef30a3 | ||
|
|
94f14a39e3 | ||
|
|
4b7844dc06 | ||
|
|
c40d0cc67b | ||
|
|
3318093a4f | ||
|
|
cf0681d7df | ||
|
|
7d7cea3278 | ||
|
|
7d38fa909d | ||
|
|
0b05de22a0 | ||
|
|
308196250e | ||
|
|
b254ca7706 | ||
|
|
9ea2d7ab09 | ||
|
|
d166f625d0 | ||
|
|
8e2dd60ea0 | ||
|
|
d51f9368ef | ||
|
|
b17035c864 | ||
|
|
fa14568cb9 | ||
|
|
64cd21b0dd | ||
|
|
abdc265fc6 | ||
|
|
1153519d78 | ||
|
|
0af6fbe1eb | ||
|
|
d54006caf7 | ||
|
|
e1771dbaea | ||
|
|
cc98abeffb | ||
|
|
a3702ac6b0 | ||
|
|
c1ffe557e1 | ||
|
|
c310a7c5e8 | ||
|
|
c5a0b00cde | ||
|
|
69902f7f5b | ||
|
|
999e8b8aca | ||
|
|
466c727dee | ||
|
|
ba9f3b335c | ||
|
|
148ca0af05 | ||
|
|
c39b9c764b | ||
|
|
d30eb6141a | ||
|
|
52c806f9e6 | ||
|
|
b2d31a7e02 | ||
|
|
cfa0b1d8ea | ||
|
|
edacce1bac | ||
|
|
074cba2266 | ||
|
|
7f23ef64a2 | ||
|
|
5b7e9666dc | ||
|
|
f4a32d19dd | ||
|
|
eb6d84c04d | ||
|
|
26587067d8 | ||
|
|
227d93a31e | ||
|
|
76f17dda53 | ||
|
|
830bae3dc1 | ||
|
|
ec09312cc5 | ||
|
|
11654ba9c6 | ||
|
|
ea356d1026 | ||
|
|
e7a3e511bd | ||
|
|
6fca30ce75 | ||
|
|
e903e59f7f | ||
|
|
bef170df7e | ||
|
|
386bfa4e20 | ||
|
|
6f867031e2 | ||
|
|
8f589391a6 | ||
|
|
30c9f63cb1 | ||
|
|
952b21d9bd | ||
|
|
1f410a503e | ||
|
|
ae5b713e58 | ||
|
|
257ca2da05 | ||
|
|
d27316eb64 | ||
|
|
64d8353629 | ||
|
|
f5e30eeaf5 | ||
|
|
21f5d9a3d0 | ||
|
|
3077810ea8 | ||
|
|
4ba2f7ad6a | ||
|
|
8eba0f89fe | ||
|
|
600f98ddab | ||
|
|
eb07e6613f | ||
|
|
6c445ff646 | ||
|
|
4d9cece9fa | ||
|
|
8beffd4dae | ||
|
|
566c6a3f4b | ||
|
|
1be3ae267e | ||
|
|
7af551fbc5 | ||
|
|
6ba6c58843 | ||
|
|
ab34fae810 | ||
|
|
42f2d015b7 | ||
|
|
2181ece28d | ||
|
|
03a5d0e5f9 | ||
|
|
352f094272 | ||
|
|
ca09dd264f | ||
|
|
eea98d0bc7 | ||
|
|
9c58faa7c2 | ||
|
|
3cdfa4e276 | ||
|
|
380ba5b8c2 | ||
|
|
04f248bcdd | ||
|
|
37b20aac66 | ||
|
|
e1e52b3165 | ||
|
|
c5c826453b | ||
|
|
e86b9b1caf | ||
|
|
46e8baac98 | ||
|
|
3b6e1ea27f | ||
|
|
5a1cc7a0de | ||
|
|
0ec5e39517 | ||
|
|
bede5a71f8 | ||
|
|
5e750b4283 | ||
|
|
4a42dc2d00 | ||
|
|
7fc170575c | ||
|
|
876658256f | ||
|
|
a24ba40d5c | ||
|
|
06d8a21714 | ||
|
|
ae44065d7e | ||
|
|
6ad0989ea2 | ||
|
|
5962ebd08a | ||
|
|
bf06d47842 | ||
|
|
d7b1615d4f | ||
|
|
8c708558cb | ||
|
|
6b36d94c6f | ||
|
|
8c41a4a6b3 | ||
|
|
8dffe1e3ac | ||
|
|
932a553b91 | ||
|
|
1d568fa462 | ||
|
|
328ba48224 | ||
|
|
6196841609 | ||
|
|
9f30571b12 | ||
|
|
1f7fec6251 | ||
|
|
c3f19ea0c1 | ||
|
|
e31515b4a2 | ||
|
|
8ad46b6344 | ||
|
|
57eacbdd59 | ||
|
|
86d6037af3 | ||
|
|
ca80fd5f9c | ||
|
|
03a54691a1 | ||
|
|
3c8088e463 | ||
|
|
2f7912abce | ||
|
|
64529e8abf | ||
|
|
9d81ea7744 | ||
|
|
688acd466c | ||
|
|
81cf46f5dd | ||
|
|
4c52d6f0fc | ||
|
|
de47a94677 | ||
|
|
bd4c47dba6 | ||
|
|
c03f271825 | ||
|
|
dfe3754afc | ||
|
|
30105199a2 | ||
|
|
e91e612b55 | ||
|
|
2a55eba1f7 | ||
|
|
39e59cbcf8 | ||
|
|
016191bdb0 | ||
|
|
0596a07c7d | ||
|
|
c62c59b445 | ||
|
|
f78101e9cc | ||
|
|
476fdcb31d | ||
|
|
02d5043ef7 | ||
|
|
0ad9e2f65b | ||
|
|
4dc0033a5a | ||
|
|
745cfe8ea1 | ||
|
|
6284a4691c | ||
|
|
41f88be55e | ||
|
|
ba95f8376f | ||
|
|
8e97c05b56 | ||
|
|
2c78f4b45b | ||
|
|
613ce84df8 | ||
|
|
2822d860d8 | ||
|
|
5a052a4f67 | ||
|
|
32c2f68cb1 | ||
|
|
4232616a5f | ||
|
|
8ddcae249c | ||
|
|
eb569304ba | ||
|
|
b0486f44bb | ||
|
|
cecf28ab7b | ||
|
|
98b92a5bf1 | ||
|
|
6b45901c47 | ||
|
|
166f9bd079 | ||
|
|
2f8f896077 | ||
|
|
9a77aff0a6 | ||
|
|
c6d65a88dc | ||
|
|
4aea29a91b | ||
|
|
0e81665a36 | ||
|
|
2a4ac1ed24 | ||
|
|
bb74b2d4f6 | ||
|
|
758a02d71a | ||
|
|
7ae142fa64 | ||
|
|
a75ed0e634 | ||
|
|
e40dc4af59 | ||
|
|
e678ea22e0 | ||
|
|
cf3195cb83 | ||
|
|
80048208d1 | ||
|
|
08de1fb4ec | ||
|
|
99d5bf9103 | ||
|
|
1dc7f056f9 | ||
|
|
cdeb2052b0 | ||
|
|
5c9109aca9 | ||
|
|
ccc1976d00 | ||
|
|
12fb6cd0ad | ||
|
|
6f95e7c1c8 | ||
|
|
a69618133e | ||
|
|
51ad0a5b48 | ||
|
|
45a6f495d2 | ||
|
|
4d9964c080 | ||
|
|
9afc31a20d | ||
|
|
d435e4047b | ||
|
|
c108afa27c | ||
|
|
eea42b56ee | ||
|
|
f4d20e42f3 | ||
|
|
f14cc374c6 | ||
|
|
99ba4caf75 | ||
|
|
ae991dda4d | ||
|
|
2b6ae57a27 | ||
|
|
5ff7b3df6d | ||
|
|
76686eca21 | ||
|
|
3965f24ab5 | ||
|
|
a75fd14e45 | ||
|
|
41cdf665ed | ||
|
|
6ea3c0e6f7 | ||
|
|
5f0263c078 | ||
|
|
58e5d325ff | ||
|
|
7003a8274b | ||
|
|
ab687af4bb | ||
|
|
f50c0c87dd | ||
|
|
9c3a4d6e37 | ||
|
|
1c8a6ce204 | ||
|
|
68a0923582 | ||
|
|
617c801cbd | ||
|
|
b0c9ce7482 | ||
|
|
4e829a2d05 | ||
|
|
a7402adfa5 | ||
|
|
9255df46cf | ||
|
|
db22445948 | ||
|
|
818e037e75 | ||
|
|
9c68f1038a | ||
|
|
10ae383bb6 | ||
|
|
aead9cfcbf | ||
|
|
055775b13d | ||
|
|
985c195e9e | ||
|
|
0628847d14 | ||
|
|
4043ae1928 | ||
|
|
fa80c83864 | ||
|
|
f739d8f5c6 | ||
|
|
166425bee9 | ||
|
|
59a804c560 | ||
|
|
b64c053531 | ||
|
|
30cd56165a | ||
|
|
510328db47 | ||
|
|
9d74f0eec0 | ||
|
|
09014d07e0 | ||
|
|
f83d4bac7b | ||
|
|
b3273ff01a | ||
|
|
1dd039fb2d | ||
|
|
1d5497e484 | ||
|
|
b37aa749c6 | ||
|
|
e45ad37eb5 | ||
|
|
72985b1fc6 | ||
|
|
6f27d3798c | ||
|
|
23a5c5f9b4 | ||
|
|
a4759a0ef4 | ||
|
|
910191b074 | ||
|
|
57125a91cf | ||
|
|
3c565638c1 | ||
|
|
c2d02aead9 | ||
|
|
0d9aafaf4e | ||
|
|
3844358380 | ||
|
|
b4125d2bf1 | ||
|
|
5c223179ed | ||
|
|
f3cb57417a | ||
|
|
7c7f071eba | ||
|
|
7c15d88cbc | ||
|
|
d4aaba2293 | ||
|
|
10d3176e70 | ||
|
|
36fcd6792a | ||
|
|
cb1eee8ff5 | ||
|
|
2d58118d7c | ||
|
|
e6bb0b81cf | ||
|
|
8ddf4c9f9f | ||
|
|
77d60fc33f | ||
|
|
504f38b42a | ||
|
|
3a18599d85 | ||
|
|
0088ba8485 | ||
|
|
8cedf618f4 | ||
|
|
fdd95eac56 | ||
|
|
10b0f0a054 | ||
|
|
1233ba6703 | ||
|
|
c35c7180d4 | ||
|
|
7080b55aac | ||
|
|
3890fa8490 | ||
|
|
a9721bab3d | ||
|
|
1bb8f1b6d2 | ||
|
|
765416db71 | ||
|
|
5989473c8a | ||
|
|
aa9da45c01 | ||
|
|
4681218416 | ||
|
|
5c746f0bd9 | ||
|
|
309f27a6b8 | ||
|
|
d2ebb215e6 | ||
|
|
7a60f713ea | ||
|
|
e80dd31fb4 | ||
|
|
94cdc10151 | ||
|
|
863a5eff97 | ||
|
|
5b81b30974 | ||
|
|
174a779e4c | ||
|
|
c7cf33ef78 | ||
|
|
ea4f4d949b | ||
|
|
6d9e6e8d4c | ||
|
|
99858f3781 | ||
|
|
84a16157d1 | ||
|
|
49d83650f4 | ||
|
|
951c85faf1 | ||
|
|
84d42b93e1 | ||
|
|
b99b1f5d57 | ||
|
|
952c51ab25 | ||
|
|
64ef8ab863 | ||
|
|
93fd80e289 | ||
|
|
5581b83c57 | ||
|
|
5017072f6c | ||
|
|
02ac6dd8c1 | ||
|
|
858269f6cb | ||
|
|
791a9b78ec | ||
|
|
aff49e6bdf | ||
|
|
2d29fdca00 | ||
|
|
063ef8507b | ||
|
|
f042b5ca5f | ||
|
|
a10e669554 | ||
|
|
501f66ab00 | ||
|
|
6003ed3ff7 | ||
|
|
03e3619817 | ||
|
|
0e42e7b05a | ||
|
|
d4fbc1687b | ||
|
|
1abceada20 | ||
|
|
8ffdc9b369 | ||
|
|
c883dd44c8 | ||
|
|
667530e202 | ||
|
|
5807d6e0dc | ||
|
|
ba4eeed358 | ||
|
|
82edc4d9f3 | ||
|
|
2a0d5746e6 | ||
|
|
23423102e7 | ||
|
|
8879ec5bb4 | ||
|
|
8cca6db316 | ||
|
|
effe1ac44d | ||
|
|
ad4308afdf | ||
|
|
6cfd85bdce | ||
|
|
8b61247712 | ||
|
|
a9267760eb | ||
|
|
73396490ba | ||
|
|
0b8fcc0a7b | ||
|
|
3d3ecbfb15 | ||
|
|
9658f0cdd4 | ||
|
|
b23500fdd0 | ||
|
|
a1365e57d8 | ||
|
|
d8ca3a455d | ||
|
|
dcc943a906 | ||
|
|
cd2010bd06 | ||
|
|
8cd16792a2 | ||
|
|
4d97586f82 | ||
|
|
3612fd8257 | ||
|
|
ff96868bd9 | ||
|
|
1694d4552e | ||
|
|
bb1593efd2 | ||
|
|
4140546afb | ||
|
|
19197812d3 | ||
|
|
168d32a474 | ||
|
|
a4fade0157 | ||
|
|
2ea6921b66 | ||
|
|
11ef31215b | ||
|
|
830a608b14 | ||
|
|
57acf7dffe | ||
|
|
9debebe03e | ||
|
|
b17e96e121 | ||
|
|
b46c3cf95f | ||
|
|
86526507a6 | ||
|
|
1b9128289f | ||
|
|
4a58f90223 | ||
|
|
e68db40d91 | ||
|
|
bd6c0c9a7d | ||
|
|
5804b9469c | ||
|
|
4c463f65b7 | ||
|
|
53b47023ac | ||
|
|
22f9f08885 | ||
|
|
f26267e572 | ||
|
|
e8c29226a1 | ||
|
|
94f48f8f54 | ||
|
|
3aac580f2c | ||
|
|
2d0b035f98 | ||
|
|
075385d3ff | ||
|
|
6ed8250301 | ||
|
|
d10ff49dcb | ||
|
|
4cf34fa932 | ||
|
|
06b5d5f1d0 | ||
|
|
d6d2641cad | ||
|
|
e71f22fd2c | ||
|
|
c996633732 | ||
|
|
55f973af3c | ||
|
|
fe9744eec6 | ||
|
|
410fa59430 | ||
|
|
522ae2bce7 | ||
|
|
a6e79947b8 | ||
|
|
b9bf945fd8 | ||
|
|
85a27c14b8 | ||
|
|
46c40ca9ca | ||
|
|
fcd0d71cb6 | ||
|
|
275bee62d9 | ||
|
|
97221a4449 | ||
|
|
508a34684b | ||
|
|
3d2144f303 | ||
|
|
3c7fbed709 | ||
|
|
fb9a155e4c | ||
|
|
fbcc870d36 | ||
|
|
020e59743b | ||
|
|
0904de3f71 | ||
|
|
35f3430687 | ||
|
|
90e8cfc737 | ||
|
|
57bd3c3459 | ||
|
|
ad0d8fac91 | ||
|
|
a8b58d2007 | ||
|
|
a099ecf55b | ||
|
|
6b91b0477d | ||
|
|
fe2c63e8e4 | ||
|
|
a3febdf00e | ||
|
|
4ca174fa0b | ||
|
|
294582f136 | ||
|
|
d7e7c1f48c | ||
|
|
215800fb67 | ||
|
|
b05d428b2e | ||
|
|
d2adadb600 | ||
|
|
8e8c0b3fc5 | ||
|
|
65d63487f3 | ||
|
|
7c5932a512 | ||
|
|
610a3fcf83 | ||
|
|
b329dc41bc | ||
|
|
ff78606153 | ||
|
|
ef249066cd | ||
|
|
80188629df | ||
|
|
f77093731c | ||
|
|
ca7d58c272 | ||
|
|
08f3351a7a | ||
|
|
f499ada94c | ||
|
|
f073040c06 | ||
|
|
49bfb43bd2 | ||
|
|
425c70719c | ||
|
|
1420aea600 | ||
|
|
4543062700 | ||
|
|
722468a859 | ||
|
|
492a9ed3cf | ||
|
|
420b577606 | ||
|
|
434038fd12 | ||
|
|
a9b154b783 | ||
|
|
a01652b816 | ||
|
|
4440e82bb9 | ||
|
|
a2e1efab90 | ||
|
|
7e1ce38f0a | ||
|
|
a93bae616e | ||
|
|
a2568936a0 | ||
|
|
23408827b3 | ||
|
|
ae6e2fab6f | ||
|
|
3af36644c0 | ||
|
|
db8f627f1a | ||
|
|
5db0fa080b | ||
|
|
d81071f254 | ||
|
|
ba148dfd88 | ||
|
|
ff40b8f8ab | ||
|
|
dbcb457a04 | ||
|
|
b00e1c8f47 | ||
|
|
899a6eb104 | ||
|
|
6e556c82a3 | ||
|
|
35ce64cc3a | ||
|
|
09b218d16c | ||
|
|
7ea451e027 | ||
|
|
ffb27c3946 | ||
|
|
6e7d2b50a0 | ||
|
|
bd025f30ff | ||
|
|
c6cbd8e826 | ||
|
|
b85afe3ca7 | ||
|
|
5a4674c973 | ||
|
|
769418e5ae | ||
|
|
38faed5799 | ||
|
|
10a578428b | ||
|
|
96cdf4a87e | ||
|
|
c0b1580561 | ||
|
|
28f9df7178 | ||
|
|
55a0c10855 | ||
|
|
7c5165763d | ||
|
|
d2836ebcb9 | ||
|
|
fecfac5ad9 | ||
|
|
5ed1ec8809 | ||
|
|
431cbf01af | ||
|
|
af792dfc06 | ||
|
|
d3b6c5f052 | ||
|
|
f48eb27f00 | ||
|
|
b02ac2561f | ||
|
|
1b2f66b201 | ||
|
|
e992f6b683 | ||
|
|
8b3f9db497 | ||
|
|
0eebfe4a8c | ||
|
|
c03344caae | ||
|
|
237b39a524 | ||
|
|
12b3fc257b | ||
|
|
66a3322ea6 | ||
|
|
4965cb7121 | ||
|
|
b92b1fecb0 | ||
|
|
43a75420d0 | ||
|
|
e85026f866 | ||
|
|
ba7b9f3ad8 | ||
|
|
4eb58d3591 | ||
|
|
8d8e58a905 | ||
|
|
8f58da4f52 | ||
|
|
a4e2aedde1 | ||
|
|
24d04fe928 | ||
|
|
0cf32f6c5e | ||
|
|
84d850ee0b | ||
|
|
51930d3ccf | ||
|
|
c5e5316f2e | ||
|
|
829ab1eb37 | ||
|
|
d9b330b46a | ||
|
|
c032b92d0d | ||
|
|
ae92a6eed4 | ||
|
|
712c4463f7 | ||
|
|
fbdc1e1cdb | ||
|
|
f2060fe7a1 | ||
|
|
6950c6bf69 | ||
|
|
8a76c6c0de | ||
|
|
ef51cf5702 | ||
|
|
0c3988202e | ||
|
|
987446cf7f | ||
|
|
6dd44317c4 | ||
|
|
d2fc705846 | ||
|
|
e393150e9c | ||
|
|
43bfd28bcd | ||
|
|
ca8f8a59f4 | ||
|
|
85a26ae052 | ||
|
|
c30b1145a1 | ||
|
|
d086ade91f | ||
|
|
64d4c458ec | ||
|
|
2478450694 | ||
|
|
9f19a042e6 | ||
|
|
922ffcc0be | ||
|
|
f887fcafe3 | ||
|
|
48b562f71b | ||
|
|
5203742b05 | ||
|
|
f14b494bfc | ||
|
|
9a4ce57001 | ||
|
|
10af961fdf | ||
|
|
33cffe45fd | ||
|
|
a0ce75a978 | ||
|
|
8d168f6ad4 | ||
|
|
0875c5bf3b | ||
|
|
b17b28d9d6 | ||
|
|
e95249dc1b | ||
|
|
bb4bdfede5 | ||
|
|
e2b241d416 | ||
|
|
aeb94fb879 | ||
|
|
8e71cbd822 | ||
|
|
9896ec2ba6 | ||
|
|
9f9a74809e | ||
|
|
acce81cdcd | ||
|
|
d72953ae78 | ||
|
|
32213b1236 | ||
|
|
761d461bad | ||
|
|
774a3b3dcd | ||
|
|
30567d0e87 | ||
|
|
6b53eb5384 | ||
|
|
767ef62b64 | ||
|
|
f7e6d1e5c8 | ||
|
|
551686c2de | ||
|
|
b73c041cc3 | ||
|
|
9e8d85285f | ||
|
|
f41fbb3b3d | ||
|
|
3f5240157e | ||
|
|
7c807f754e | ||
|
|
9e1b23caf6 | ||
|
|
c2bad62d36 | ||
|
|
4516d44cc0 | ||
|
|
9c02b01318 | ||
|
|
08fab451d2 | ||
|
|
d47570c642 | ||
|
|
4547386b1f | ||
|
|
ab01dc5e54 | ||
|
|
380c742aad | ||
|
|
368359917b | ||
|
|
1c1b570cb3 | ||
|
|
3fe43372a7 | ||
|
|
c7bc1d7dcd | ||
|
|
a7ea6ec80d | ||
|
|
9cf574b9e5 | ||
|
|
20e63a1190 | ||
|
|
f6fc5de520 | ||
|
|
0b89118cd1 | ||
|
|
fa3a81575a | ||
|
|
6990766f75 | ||
|
|
e1e1a66a0b | ||
|
|
e552994f68 | ||
|
|
b33afd99a5 | ||
|
|
3c2ba4a0ea | ||
|
|
ab0fc07ee9 | ||
|
|
001650d48e | ||
|
|
659431ebfd | ||
|
|
0a419cb105 | ||
|
|
a4d4d17b82 | ||
|
|
0829ff6908 | ||
|
|
2d1b0fd6d0 | ||
|
|
122539596d | ||
|
|
86015e59a1 | ||
|
|
107a23f1ec | ||
|
|
abce068b97 | ||
|
|
28fd9241d4 | ||
|
|
3fc4746a52 | ||
|
|
1ea1e00344 | ||
|
|
598f219105 | ||
|
|
bbf7193c51 | ||
|
|
adecb21ada | ||
|
|
fa4679dcc4 | ||
|
|
58917eeeb4 | ||
|
|
f36e193650 | ||
|
|
dac484136f | ||
|
|
999ad857ae | ||
|
|
d073b9da65 | ||
|
|
aaa0b25106 | ||
|
|
f7dabcaeb0 | ||
|
|
3409086978 | ||
|
|
6c201db3dd | ||
|
|
da47df0a25 | ||
|
|
eea215dacf | ||
|
|
0949271dda | ||
|
|
6bb9227159 | ||
|
|
a95a37277c | ||
|
|
48b9aa5c18 | ||
|
|
1d7203a6fb | ||
|
|
1030b00f0a | ||
|
|
0c16d1fc11 | ||
|
|
ed04375385 | ||
|
|
6e49d20383 | ||
|
|
dc34eed203 | ||
|
|
fbe4f3fad8 | ||
|
|
e7ee3a06c7 | ||
|
|
599877195b | ||
|
|
7f9d267992 | ||
|
|
52904db413 | ||
|
|
5e0bde226a | ||
|
|
0695039ee3 | ||
|
|
a4bcd4451c | ||
|
|
e5b4b61832 | ||
|
|
dd55dc277b | ||
|
|
81ef1ae964 | ||
|
|
46701e4de7 | ||
|
|
0f52ccb433 | ||
|
|
8aed84e6af | ||
|
|
568497e1c5 | ||
|
|
f3f8e0013d | ||
|
|
d03c145189 | ||
|
|
682a5fde94 | ||
|
|
cca5bac30a | ||
|
|
64e102a8c6 | ||
|
|
f9972f50e0 | ||
|
|
05d9a7e820 | ||
|
|
df290950ea | ||
|
|
ae64be4802 | ||
|
|
348f3c382e | ||
|
|
d98678fc5f | ||
|
|
1da157d33f | ||
|
|
de4f004a08 | ||
|
|
522ef282c8 | ||
|
|
b5522ea260 | ||
|
|
b1f184c493 | ||
|
|
d66dd51bf6 | ||
|
|
0baed55a44 | ||
|
|
390b359761 | ||
|
|
311f41c610 | ||
|
|
0a156c76a2 | ||
|
|
70eaaa9e3b | ||
|
|
3e622f7185 | ||
|
|
3f12be50ac | ||
|
|
68412b49a1 | ||
|
|
c9b2620461 | ||
|
|
337b03aa68 | ||
|
|
df3f16ccf1 | ||
|
|
22aa5c41b5 | ||
|
|
8e09567221 | ||
|
|
3505788d42 | ||
|
|
91e0c9b940 | ||
|
|
00996b047f | ||
|
|
44fc0f367d | ||
|
|
b0e6259073 | ||
|
|
6255b2a907 | ||
|
|
a5fb0be274 | ||
|
|
e835f6d998 | ||
|
|
54ff564bb1 | ||
|
|
f8a5ded0ba | ||
|
|
a1be66f02b | ||
|
|
0815ad2cf0 | ||
|
|
3484047ad4 | ||
|
|
a63fa1cce5 | ||
|
|
59119ebc3b | ||
|
|
276f1b7e68 | ||
|
|
c482e5b5ca | ||
|
|
8c3e0adf35 | ||
|
|
64ff3ac672 | ||
|
|
cfe92525ed | ||
|
|
0e3a9ee2b2 | ||
|
|
a921db2cc6 | ||
|
|
3d99f86630 | ||
|
|
8d1a58ec06 | ||
|
|
2e5a7cb5a1 | ||
|
|
895f02f178 | ||
|
|
c59869982b | ||
|
|
3b3368f950 | ||
|
|
3f02c760c2 | ||
|
|
fee603e500 | ||
|
|
ad31d8014d | ||
|
|
58a0ac74d2 | ||
|
|
184984c16f | ||
|
|
2cf7a5e114 | ||
|
|
8388aa9c23 | ||
|
|
c1894d8c00 | ||
|
|
f7f9cdc518 | ||
|
|
850d7f8220 | ||
|
|
051043283c | ||
|
|
15bc69de01 | ||
|
|
ee3cfa4d6d | ||
|
|
df1f3079a5 | ||
|
|
d9ae8a5552 | ||
|
|
2326c31ee7 | ||
|
|
91cb0f30dd | ||
|
|
c0307c352c | ||
|
|
8fd7c1b313 | ||
|
|
b8147659b1 | ||
|
|
7a1bac682f | ||
|
|
9fdb7c977f | ||
|
|
4f3948323b | ||
|
|
70fcc1f712 | ||
|
|
f20fe9199f | ||
|
|
91dee4a3b8 | ||
|
|
0b89b8084e | ||
|
|
a5a80302b2 | ||
|
|
e61a24ee7b | ||
|
|
55ed342b59 | ||
|
|
3c6f79eec0 | ||
|
|
590800ac1d | ||
|
|
95c412b946 | ||
|
|
a232395750 | ||
|
|
6edbc8b6a5 | ||
|
|
f8ffeed302 | ||
|
|
e2ee68427c | ||
|
|
74ff23239d | ||
|
|
f1fa2ba2f6 | ||
|
|
e1522cec94 | ||
|
|
8841b3cbb1 | ||
|
|
94260bd93f | ||
|
|
15ff8af7ac | ||
|
|
d420033b36 | ||
|
|
bda63f0310 | ||
|
|
54add26ccb | ||
|
|
089b068362 | ||
|
|
fe474b4507 | ||
|
|
bbe15b563c | ||
|
|
59025b8f47 | ||
|
|
1b42c5edb1 | ||
|
|
362335913d | ||
|
|
4340dac595 | ||
|
|
f3e1fc884c | ||
|
|
39c06d8817 | ||
|
|
91cee36c21 | ||
|
|
6bef883942 | ||
|
|
25ba2406c0 | ||
|
|
e4dc8f85a7 | ||
|
|
12a4a260c8 | ||
|
|
268f02b5c3 | ||
|
|
13eff43b87 | ||
|
|
e604a19bce | ||
|
|
e63e39fe9a | ||
|
|
584c951824 | ||
|
|
f0d9982ee4 | ||
|
|
c65de74d13 | ||
|
|
df0a9701ba | ||
|
|
4ec7b1ff1e | ||
|
|
7d3a465386 | ||
|
|
30347900d9 | ||
|
|
e5f88fe2f4 | ||
|
|
0d0ccfd0ac | ||
|
|
9013d11d24 | ||
|
|
fc5672a161 | ||
|
|
221c3629e4 | ||
|
|
76fc56f1c9 | ||
|
|
8e59aa2885 | ||
|
|
0738dbd613 | ||
|
|
196ecffaf3 | ||
|
|
a0fedbd4b0 | ||
|
|
7c47e22000 | ||
|
|
6aad6a1618 | ||
|
|
b764172500 | ||
|
|
c185d79672 | ||
|
|
76b8ba91dd | ||
|
|
0418c831e6 | ||
|
|
4078f94caa | ||
|
|
a12ae8ad24 | ||
|
|
498ca29aab | ||
|
|
ba70e457b6 | ||
|
|
d62808fe1d | ||
|
|
6c14b79dfb | ||
|
|
631a253bcc | ||
|
|
4cb63100d3 | ||
|
|
42fcee0cfd | ||
|
|
829a2e937b | ||
|
|
5d7e5e8e59 | ||
|
|
6f0a0ef324 | ||
|
|
f7fe91abeb | ||
|
|
7252e8d160 | ||
|
|
2630c35f8c | ||
|
|
49f466c073 | ||
|
|
c198f785e6 | ||
|
|
5be093dafc | ||
|
|
2c33d5256c | ||
|
|
4448e2b5df | ||
|
|
146d234dec | ||
|
|
18d5c924e6 | ||
|
|
b520838195 | ||
|
|
1b036b763c | ||
|
|
8545a8bf0d | ||
|
|
f0136a5018 | ||
|
|
6697b3376b | ||
|
|
ea785f79b8 | ||
|
|
0352a09de7 | ||
|
|
5b4f15ab2e | ||
|
|
fd37c2b76b | ||
|
|
924aa05681 | ||
|
|
84b42210f1 | ||
|
|
941080c395 | ||
|
|
35d9a10cf4 | ||
|
|
7c181379b4 | ||
|
|
f9576d8afb | ||
|
|
6a8a113fa1 | ||
|
|
ef59c34165 | ||
|
|
a19e1f06c0 | ||
|
|
a9371f0a90 | ||
|
|
a7a94e49e8 | ||
|
|
affd100298 | ||
|
|
fd6ec301a4 | ||
|
|
5666e6084b | ||
|
|
69309c437e | ||
|
|
e392e4d344 | ||
|
|
bd53856927 | ||
|
|
cbd1018ecf | ||
|
|
46606152eb | ||
|
|
e6f93e0a08 | ||
|
|
8d81f1822f | ||
|
|
5903607363 | ||
|
|
590a8f52db | ||
|
|
ecac47d1bc | ||
|
|
3b477ef637 | ||
|
|
e2ef5b2ef3 | ||
|
|
1d59feeb72 | ||
|
|
c53dd31765 | ||
|
|
4c02081992 | ||
|
|
cb57af3c53 | ||
|
|
01d810fc00 | ||
|
|
8c2a9279ee | ||
|
|
0d65448f3d | ||
|
|
9da2b3c11a | ||
|
|
95400da977 | ||
|
|
dc41dc4c69 | ||
|
|
a5c11d4c23 | ||
|
|
878394535e | ||
|
|
35dba27a55 | ||
|
|
f22ad13fa9 | ||
|
|
aa2e5cb87b | ||
|
|
7740f3da7e | ||
|
|
badb576991 | ||
|
|
c65a63fc7e | ||
|
|
0111747016 | ||
|
|
eac4b0d87b | ||
|
|
3dadce4da4 | ||
|
|
1864468818 | ||
|
|
1a59379162 | ||
|
|
31d34c3946 | ||
|
|
3cc394f02d | ||
|
|
53c4fe9e80 | ||
|
|
d5521068b0 | ||
|
|
a63ef4010d | ||
|
|
cec3e86eef | ||
|
|
8950bb7af9 | ||
|
|
9e6fe7ceb9 | ||
|
|
c333d18cd0 | ||
|
|
0271ef69c9 | ||
|
|
2d493a4ea2 | ||
|
|
e339ab856f | ||
|
|
782904a971 | ||
|
|
a3753c01bc | ||
|
|
d5c3921846 | ||
|
|
a2c462b3da | ||
|
|
8673c7ef6e | ||
|
|
8d7be7757f | ||
|
|
6b83927048 | ||
|
|
e07adbd60e | ||
|
|
7798b8dcdc | ||
|
|
146e7970bf | ||
|
|
f4f7cc58e3 | ||
|
|
21b4b494e7 | ||
|
|
7307844bee | ||
|
|
5d419dd4ec | ||
|
|
6d0db7cc5e | ||
|
|
8de606588c | ||
|
|
5842b1272d | ||
|
|
35b0a85818 | ||
|
|
fcdd85af6c | ||
|
|
5aac2dc9df | ||
|
|
17a9b4e442 | ||
|
|
becb0b37e6 | ||
|
|
67ca876567 | ||
|
|
464ce66fd5 | ||
|
|
3e505481fe | ||
|
|
c90c3a183e | ||
|
|
d1a7e734dc | ||
|
|
acddf36467 | ||
|
|
166d32032a | ||
|
|
e4238a62c9 | ||
|
|
ad9c466712 | ||
|
|
a3d31bbaf1 | ||
|
|
4821139501 | ||
|
|
244d4f78e2 | ||
|
|
dedf65bd4b | ||
|
|
a79ed02ccf | ||
|
|
79f87babdf | ||
|
|
f296d5138b | ||
|
|
eb9b94b9c6 | ||
|
|
e505067759 |
6
.github/workflows/pr-testing.yml
vendored
6
.github/workflows/pr-testing.yml
vendored
@@ -8,16 +8,16 @@ jobs:
|
||||
mavenTesting:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache local Maven repository
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- name: Set up the Java JDK
|
||||
uses: actions/setup-java@v2
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '11'
|
||||
distribution: 'adopt'
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -14,7 +14,6 @@
|
||||
/.mvn.classpath
|
||||
/notes*
|
||||
/settings.json
|
||||
/testnet*
|
||||
/settings*.json
|
||||
/testchain*.json
|
||||
/run-testnet*.sh
|
||||
@@ -28,6 +27,7 @@
|
||||
/WindowsInstaller/Install Files/qortal.jar
|
||||
/*.7z
|
||||
/tmp
|
||||
/wallets
|
||||
/data*
|
||||
/src/test/resources/arbitrary/*/.qortal/cache
|
||||
apikey.txt
|
||||
|
||||
848
Q-Apps.md
Normal file
848
Q-Apps.md
Normal file
@@ -0,0 +1,848 @@
|
||||
# Qortal Project - Q-Apps Documentation
|
||||
|
||||
## Introduction
|
||||
|
||||
Q-Apps are static web apps written in javascript, HTML, CSS, and other static assets. The key difference between a Q-App and a fully static site is its ability to interact with both the logged-in user and on-chain data. This is achieved using the API described in this document.
|
||||
|
||||
|
||||
# Section 0: Basic QDN concepts
|
||||
|
||||
## Introduction to QDN resources
|
||||
Each published item on QDN (Qortal Data Network) is referred to as a "resource". A resource could contain anything from a few characters of text, to a multi-layered directory structure containing thousands of files.
|
||||
|
||||
Resources are stored on-chain, however the data payload is generally stored off-chain, and verified using an on-chain SHA-256 hash.
|
||||
|
||||
To publish a resource, a user must first register a name, and then include that name when publishing the data. Accounts without a registered name are unable to publish to QDN from a Q-App at this time.
|
||||
|
||||
Owning the name grants update privileges to the data. If that name is later sold or transferred, the permission to update that resource is moved to the new owner.
|
||||
|
||||
|
||||
## Name, service & identifier
|
||||
|
||||
Each QDN resource has 3 important fields:
|
||||
- `name` - the registered name of the account that is publishing the data (which will hold update/edit privileges going forwards).<br /><br />
|
||||
- `service` - the type of content (e.g. IMAGE or JSON). Different services have different validation rules. See [list of available services](#services).<br /><br />
|
||||
- `identifier` - an optional string to allow more than one resource to exist for a given name/service combination. For example, the name `QortalDemo` may wish to publish multiple images. This can be achieved by using a different identifier string for each. The identifier is only unique to the name in question, and so it doesn't matter if another name is using the same service and identifier string.
|
||||
|
||||
|
||||
## Shared identifiers
|
||||
|
||||
Since an identifier can be used by multiple names, this can be used to the advantage of Q-App developers as it allows for data to be stored in a deterministic location.
|
||||
|
||||
An example of this is the user's avatar. This will always be published with service `THUMBNAIL` and identifier `qortal_avatar`, along with the user's name. So, an app can display the avatar of a user just by specifying their name when requesting the data. The same applies when publishing data.
|
||||
|
||||
|
||||
## "Default" resources
|
||||
|
||||
A "default" resource refers to one without an identifier. For example, when a website is published via the UI, it will use the user's name and the service `WEBSITE`. These do not have an identifier, and are therefore the "default" website for this name. When requesting or publishing data without an identifier, apps can either omit the `identifier` key entirely, or include `"identifier": "default"` to indicate that the resource(s) being queried or published do not have an identifier.
|
||||
|
||||
|
||||
<a name="services"></a>
|
||||
## Available service types
|
||||
|
||||
Here is a list of currently available services that can be used in Q-Apps:
|
||||
|
||||
IMAGE,
|
||||
THUMBNAIL,
|
||||
VIDEO,
|
||||
AUDIO,
|
||||
PODCAST,
|
||||
VOICE,
|
||||
ARBITRARY_DATA,
|
||||
JSON,
|
||||
DOCUMENT,
|
||||
LIST,
|
||||
PLAYLIST,
|
||||
METADATA,
|
||||
BLOG,
|
||||
BLOG_POST,
|
||||
BLOG_COMMENT,
|
||||
GIF_REPOSITORY,
|
||||
ATTACHMENT,
|
||||
FILE,
|
||||
FILES,
|
||||
CHAIN_DATA,
|
||||
STORE,
|
||||
PRODUCT,
|
||||
OFFER,
|
||||
COUPON,
|
||||
CODE,
|
||||
PLUGIN,
|
||||
EXTENSION,
|
||||
GAME,
|
||||
ITEM,
|
||||
NFT,
|
||||
DATABASE,
|
||||
SNAPSHOT,
|
||||
COMMENT,
|
||||
CHAIN_COMMENT,
|
||||
WEBSITE,
|
||||
APP,
|
||||
QCHAT_ATTACHMENT,
|
||||
QCHAT_IMAGE,
|
||||
QCHAT_AUDIO,
|
||||
QCHAT_VOICE
|
||||
|
||||
|
||||
## Single vs multi-file resources
|
||||
|
||||
Some resources, such as those published with the `IMAGE` or `JSON` service, consist of a single file or piece of data (the image or the JSON string). This is the most common type of QDN resource, especially in the context of Q-Apps. These can be published by supplying a base64-encoded string containing the data.
|
||||
|
||||
Other resources, such as those published with the `WEBSITE`, `APP`, or `GIF_REPOSITORY` service, can contain multiple files and directories. Publishing these kinds of files is not yet available for Q-Apps, however it is possible to retrieve multi-file resources that are already published. When retrieving this data (via FETCH_QDN_RESOURCE), a `filepath` must be included to indicate the file that you would like to retrieve. There is no need to specify a filepath for single file resources, as these will automatically return the contents of the single file.
|
||||
|
||||
|
||||
## App-specific data
|
||||
|
||||
Some apps may want to make all QDN data for a particular service available. However, others may prefer to only deal with data that has been published by their app (if a specific format/schema is being used for instance).
|
||||
|
||||
Identifiers can be used to allow app developers to locate data that has been published by their app. The recommended approach for this is to use the app name as a prefix on all identifiers when publishing data.
|
||||
|
||||
For instance, an app called `MyApp` could allow users to publish JSON data. The app could choose to prefix all identifiers with the string `myapp_`, and then use a random string for each published resource (resulting in identifiers such as `myapp_qR5ndZ8v`). Then, to locate data that has potentially been published by users of MyApp, it can later search the QDN database for items with `"service": "JSON"` and `"identifier": "myapp_"`. The SEARCH_QDN_RESOURCES action has a `prefix` option in order to match identifiers beginning with the supplied string.
|
||||
|
||||
Note that QDN is a permissionless system, and therefore it's not possible to verify that a resource was actually published by the app. It is recommended that apps validate the contents of the resource to ensure it is formatted correctly, instead of making assumptions.
|
||||
|
||||
|
||||
## Updating a resource
|
||||
|
||||
To update a resource, it can be overwritten by publishing with the same `name`, `service`, and `identifier` combination. Note that the authenticated account must currently own the name in order to publish an update.
|
||||
|
||||
|
||||
## Routing
|
||||
|
||||
If a non-existent `filepath` is accessed, the default behaviour of QDN is to return a `404: File not found` error. This includes anything published using the `WEBSITE` service.
|
||||
|
||||
However, routing is handled differently for anything published using the `APP` service.
|
||||
|
||||
For apps, QDN automatically sends all unhandled requests to the index file (generally index.html). This allows the app to use custom routing, as it is able to listen on any path. If a file exists at a path, the file itself will be served, and so the request won't be sent to the index file.
|
||||
|
||||
It's recommended that all apps return a 404 page if a request isn't able to be routed.
|
||||
|
||||
|
||||
# Section 1: Simple links and image loading via HTML
|
||||
|
||||
## Section 1a: Linking to other QDN websites / resources
|
||||
|
||||
The `qortal://` protocol can be used to access QDN data from within Qortal websites and apps. The basic format is as follows:
|
||||
```
|
||||
<a href="qortal://{service}/{name}/{identifier}/{path}">link text</a>
|
||||
```
|
||||
|
||||
However, the system will support the omission of the `identifier` and/or `path` components to allow for simpler URL formats.
|
||||
|
||||
A simple link to another website can be achieved with this HTML code:
|
||||
```
|
||||
<a href="qortal://WEBSITE/QortalDemo">link text</a>
|
||||
```
|
||||
|
||||
To link to a specific page of another website:
|
||||
```
|
||||
<a href="qortal://WEBSITE/QortalDemo/minting-leveling/index.html">link text</a>
|
||||
```
|
||||
|
||||
To link to a standalone resource, such as an avatar
|
||||
```
|
||||
<a href="qortal://THUMBNAIL/QortalDemo/qortal_avatar">avatar</a>
|
||||
```
|
||||
|
||||
For cases where you would prefer to explicitly include an identifier (to remove ambiguity) you can use the keyword `default` to access a resource that doesn't have an identifier. For instance:
|
||||
```
|
||||
<a href="qortal://WEBSITE/QortalDemo/default">link to root of website</a>
|
||||
<a href="qortal://WEBSITE/QortalDemo/default/minting-leveling/index.html">link to subpage of website</a>
|
||||
```
|
||||
|
||||
|
||||
## Section 1b: Linking to other QDN images
|
||||
|
||||
The same applies for images, such as displaying an avatar:
|
||||
```
|
||||
<img src="qortal://THUMBNAIL/QortalDemo/qortal_avatar" />
|
||||
```
|
||||
|
||||
...or even an image from an entirely different website:
|
||||
```
|
||||
<img src="qortal://WEBSITE/AlphaX/assets/img/logo.png" />
|
||||
```
|
||||
|
||||
|
||||
# Section 2: Integrating a Javascript app
|
||||
|
||||
Javascript apps allow for much more complex integrations with Qortal's blockchain data.
|
||||
|
||||
## Section 2a: Direct API calls
|
||||
|
||||
The standard [Qortal Core API](http://localhost:12391/api-documentation) is available to websites and apps, and can be called directly using a standard AJAX request, such as:
|
||||
```
|
||||
async function getNameInfo(name) {
|
||||
const response = await fetch("/names/" + name);
|
||||
const nameData = await response.json();
|
||||
console.log("nameData: " + JSON.stringify(nameData));
|
||||
}
|
||||
getNameInfo("QortalDemo");
|
||||
```
|
||||
|
||||
However, this only works for read-only data, such as looking up transactions, names, balances, etc. Also, since the address of the logged in account can't be retrieved from the core, apps can't show personalized data with this approach.
|
||||
|
||||
|
||||
## Section 2b: User interaction via qortalRequest()
|
||||
|
||||
To take things a step further, the qortalRequest() function can be used to interact with the user, in order to:
|
||||
|
||||
- Request address and public key of the logged in account
|
||||
- Publish data to QDN
|
||||
- Send chat messages
|
||||
- Join groups
|
||||
- Deploy ATs (smart contracts)
|
||||
- Send QORT or any supported foreign coin
|
||||
- Add/remove items from lists
|
||||
|
||||
In addition to the above, qortalRequest() also supports many read-only functions that are also available via direct core API calls. Using qortalRequest() helps with futureproofing, as the core APIs can be modified without breaking functionality of existing Q-Apps.
|
||||
|
||||
|
||||
### Making a request
|
||||
|
||||
Qortal core will automatically inject the `qortalRequest()` javascript function to all websites/apps, which returns a Promise. This can be used to fetch data or publish data to the Qortal blockchain. This functionality supports async/await, as well as try/catch error handling.
|
||||
|
||||
```
|
||||
async function myfunction() {
|
||||
try {
|
||||
let res = await qortalRequest({
|
||||
action: "GET_ACCOUNT_DATA",
|
||||
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
|
||||
});
|
||||
console.log(JSON.stringify(res)); // Log the response to the console
|
||||
|
||||
} catch(e) {
|
||||
console.log("Error: " + JSON.stringify(e));
|
||||
}
|
||||
}
|
||||
myfunction();
|
||||
```
|
||||
|
||||
### Timeouts
|
||||
|
||||
Request timeouts are handled automatically when using qortalRequest(). The timeout value will differ based on the action being used - see `getDefaultTimeout()` in [q-apps.js](src/main/resources/q-apps/q-apps.js) for the current values.
|
||||
|
||||
If a request times out it will throw an error - `The request timed out` - which can be handled by the Q-App.
|
||||
|
||||
It is also possible to specify a custom timeout using `qortalRequestWithTimeout(request, timeout)`, however this is discouraged. It's more reliable and futureproof to let the core handle the timeout values.
|
||||
|
||||
|
||||
# Section 3: qortalRequest Documentation
|
||||
|
||||
## Supported actions
|
||||
|
||||
Here is a list of currently supported actions:
|
||||
- GET_USER_ACCOUNT
|
||||
- GET_ACCOUNT_DATA
|
||||
- GET_ACCOUNT_NAMES
|
||||
- GET_NAME_DATA
|
||||
- LIST_QDN_RESOURCES
|
||||
- SEARCH_QDN_RESOURCES
|
||||
- GET_QDN_RESOURCE_STATUS
|
||||
- GET_QDN_RESOURCE_PROPERTIES
|
||||
- GET_QDN_RESOURCE_METADATA
|
||||
- GET_QDN_RESOURCE_URL
|
||||
- LINK_TO_QDN_RESOURCE
|
||||
- FETCH_QDN_RESOURCE
|
||||
- PUBLISH_QDN_RESOURCE
|
||||
- PUBLISH_MULTIPLE_QDN_RESOURCES
|
||||
- GET_WALLET_BALANCE
|
||||
- GET_BALANCE
|
||||
- SEND_COIN
|
||||
- SEARCH_CHAT_MESSAGES
|
||||
- SEND_CHAT_MESSAGE
|
||||
- LIST_GROUPS
|
||||
- JOIN_GROUP
|
||||
- DEPLOY_AT
|
||||
- GET_AT
|
||||
- GET_AT_DATA
|
||||
- LIST_ATS
|
||||
- FETCH_BLOCK
|
||||
- FETCH_BLOCK_RANGE
|
||||
- SEARCH_TRANSACTIONS
|
||||
- GET_PRICE
|
||||
- GET_LIST_ITEMS
|
||||
- ADD_LIST_ITEMS
|
||||
- DELETE_LIST_ITEM
|
||||
|
||||
More functionality will be added in the future.
|
||||
|
||||
## Example Requests
|
||||
|
||||
Here are some example requests for each of the above:
|
||||
|
||||
### Get address of logged in account
|
||||
_Will likely require user approval_
|
||||
```
|
||||
let account = await qortalRequest({
|
||||
action: "GET_USER_ACCOUNT"
|
||||
});
|
||||
let address = account.address;
|
||||
```
|
||||
|
||||
### Get public key of logged in account
|
||||
_Will likely require user approval_
|
||||
```
|
||||
let pubkey = await qortalRequest({
|
||||
action: "GET_USER_ACCOUNT"
|
||||
});
|
||||
let publicKey = account.publicKey;
|
||||
```
|
||||
|
||||
### Get account data
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_ACCOUNT_DATA",
|
||||
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
|
||||
});
|
||||
```
|
||||
|
||||
### Get names owned by account
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_ACCOUNT_NAMES",
|
||||
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
|
||||
});
|
||||
```
|
||||
|
||||
### Get name data
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_NAME_DATA",
|
||||
name: "QortalDemo"
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
### List QDN resources
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "LIST_QDN_RESOURCES",
|
||||
service: "THUMBNAIL",
|
||||
name: "QortalDemo", // Optional (exact match)
|
||||
identifier: "qortal_avatar", // Optional (exact match)
|
||||
default: true, // Optional
|
||||
includeStatus: false, // Optional - will take time to respond, so only request if necessary
|
||||
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
|
||||
followedOnly: false, // Optional - include followed names only
|
||||
excludeBlocked: false, // Optional - exclude blocked content
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Search QDN resources
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "SEARCH_QDN_RESOURCES",
|
||||
service: "THUMBNAIL",
|
||||
query: "search query goes here", // Optional - searches both "identifier" and "name" fields
|
||||
identifier: "search query goes here", // Optional - searches only the "identifier" field
|
||||
name: "search query goes here", // Optional - searches only the "name" field
|
||||
prefix: false, // Optional - if true, only the beginning of fields are matched in all of the above filters
|
||||
exactMatchNames: true, // Optional - if true, partial name matches are excluded
|
||||
default: false, // Optional - if true, only resources without identifiers are returned
|
||||
includeStatus: false, // Optional - will take time to respond, so only request if necessary
|
||||
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
|
||||
nameListFilter: "QApp1234Subscriptions", // Optional - will only return results if they are from a name included in supplied list
|
||||
followedOnly: false, // Optional - include followed names only
|
||||
excludeBlocked: false, // Optional - exclude blocked content
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Search QDN resources (multiple names)
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "SEARCH_QDN_RESOURCES",
|
||||
service: "THUMBNAIL",
|
||||
query: "search query goes here", // Optional - searches both "identifier" and "name" fields
|
||||
identifier: "search query goes here", // Optional - searches only the "identifier" field
|
||||
names: ["QortalDemo", "crowetic", "AlphaX"], // Optional - searches only the "name" field for any of the supplied names
|
||||
prefix: false, // Optional - if true, only the beginning of fields are matched in all of the above filters
|
||||
default: false, // Optional - if true, only resources without identifiers are returned
|
||||
includeStatus: false, // Optional - will take time to respond, so only request if necessary
|
||||
includeMetadata: false, // Optional - will take time to respond, so only request if necessary
|
||||
nameListFilter: "QApp1234Subscriptions", // Optional - will only return results if they are from a name included in supplied list
|
||||
followedOnly: false, // Optional - include followed names only
|
||||
excludeBlocked: false, // Optional - exclude blocked content
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Fetch QDN single file resource
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "FETCH_QDN_RESOURCE",
|
||||
name: "QortalDemo",
|
||||
service: "THUMBNAIL",
|
||||
identifier: "qortal_avatar", // Optional. If omitted, the default resource is returned, or you can alternatively use the keyword "default"
|
||||
encoding: "base64", // Optional. If omitted, data is returned in raw form
|
||||
rebuild: false
|
||||
});
|
||||
```
|
||||
|
||||
### Fetch file from multi file QDN resource
|
||||
Data is returned in the base64 format
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "FETCH_QDN_RESOURCE",
|
||||
name: "QortalDemo",
|
||||
service: "WEBSITE",
|
||||
identifier: "default", // Optional. If omitted, the default resource is returned, or you can alternatively request that using the keyword "default", as shown here
|
||||
filepath: "index.html", // Required only for resources containing more than one file
|
||||
rebuild: false
|
||||
});
|
||||
```
|
||||
|
||||
### Get QDN resource status
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_STATUS",
|
||||
name: "QortalDemo",
|
||||
service: "THUMBNAIL",
|
||||
identifier: "qortal_avatar" // Optional
|
||||
});
|
||||
```
|
||||
|
||||
### Get QDN resource properties
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_PROPERTIES",
|
||||
name: "QortalDemo",
|
||||
service: "THUMBNAIL",
|
||||
identifier: "qortal_avatar" // Optional
|
||||
});
|
||||
// Returns: filename, size, mimeType (where available)
|
||||
```
|
||||
|
||||
### Get QDN resource metadata
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_METADATA",
|
||||
name: "QortalDemo",
|
||||
service: "THUMBNAIL",
|
||||
identifier: "qortal_avatar" // Optional
|
||||
});
|
||||
```
|
||||
|
||||
### Publish a single file to QDN
|
||||
_Requires user approval_.<br />
|
||||
Note: this publishes a single, base64-encoded file. Multi-file resource publishing (such as a WEBSITE or GIF_REPOSITORY) is not yet supported via a Q-App. It will be added in a future update.
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "PUBLISH_QDN_RESOURCE",
|
||||
name: "Demo", // Publisher must own the registered name - use GET_ACCOUNT_NAMES for a list
|
||||
service: "IMAGE",
|
||||
identifier: "myapp-image1234" // Optional
|
||||
data64: "base64_encoded_data",
|
||||
// filename: "image.jpg", // Optional - to help apps determine the file's type
|
||||
// title: "Title", // Optional
|
||||
// description: "Description", // Optional
|
||||
// category: "TECHNOLOGY", // Optional
|
||||
// tag1: "any", // Optional
|
||||
// tag2: "strings", // Optional
|
||||
// tag3: "can", // Optional
|
||||
// tag4: "go", // Optional
|
||||
// tag5: "here" // Optional
|
||||
});
|
||||
```
|
||||
|
||||
### Publish multiple resources at once to QDN
|
||||
_Requires user approval_.<br />
|
||||
Note: each resource being published consists of a single, base64-encoded file, each in its own transaction. Useful for publishing two or more related things, such as a video and a video thumbnail.
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "PUBLISH_MULTIPLE_QDN_RESOURCES",
|
||||
resources: [
|
||||
name: "Demo", // Publisher must own the registered name - use GET_ACCOUNT_NAMES for a list
|
||||
service: "IMAGE",
|
||||
identifier: "myapp-image1234" // Optional
|
||||
data64: "base64_encoded_data",
|
||||
// filename: "image.jpg", // Optional - to help apps determine the file's type
|
||||
// title: "Title", // Optional
|
||||
// description: "Description", // Optional
|
||||
// category: "TECHNOLOGY", // Optional
|
||||
// tag1: "any", // Optional
|
||||
// tag2: "strings", // Optional
|
||||
// tag3: "can", // Optional
|
||||
// tag4: "go", // Optional
|
||||
// tag5: "here" // Optional
|
||||
],
|
||||
[
|
||||
... more resources here if needed ...
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Get wallet balance (QORT)
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "GET_WALLET_BALANCE",
|
||||
coin: "QORT"
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
### Get address or asset balance
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_BALANCE",
|
||||
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
|
||||
});
|
||||
```
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_BALANCE",
|
||||
assetId: 1,
|
||||
address: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2"
|
||||
});
|
||||
```
|
||||
|
||||
### Send QORT to address
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "SEND_COIN",
|
||||
coin: "QORT",
|
||||
destinationAddress: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2",
|
||||
amount: 1.00000000 // 1 QORT
|
||||
});
|
||||
```
|
||||
|
||||
### Send foreign coin to address
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "SEND_COIN",
|
||||
coin: "LTC",
|
||||
destinationAddress: "LSdTvMHRm8sScqwCi6x9wzYQae8JeZhx6y",
|
||||
amount: 1.00000000, // 1 LTC
|
||||
fee: 0.00000020 // fee per byte
|
||||
});
|
||||
```
|
||||
|
||||
### Search or list chat messages
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "SEARCH_CHAT_MESSAGES",
|
||||
before: 999999999999999,
|
||||
after: 0,
|
||||
txGroupId: 0, // Optional (must specify either txGroupId or two involving addresses)
|
||||
// involving: ["QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2", "QSefrppsDCsZebcwrqiM1gNbWq7YMDXtG2"], // Optional (must specify either txGroupId or two involving addresses)
|
||||
// reference: "reference", // Optional
|
||||
// chatReference: "chatreference", // Optional
|
||||
// hasChatReference: true, // Optional
|
||||
encoding: "BASE64", // Optional (defaults to BASE58 if omitted)
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Send a group chat message
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "SEND_CHAT_MESSAGE",
|
||||
groupId: 0,
|
||||
message: "Test"
|
||||
});
|
||||
```
|
||||
|
||||
### Send a private chat message
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "SEND_CHAT_MESSAGE",
|
||||
destinationAddress: "QZLJV7wbaFyxaoZQsjm6rb9MWMiDzWsqM2",
|
||||
message: "Test"
|
||||
});
|
||||
```
|
||||
|
||||
### List groups
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "LIST_GROUPS",
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Join a group
|
||||
_Requires user approval_
|
||||
```
|
||||
await qortalRequest({
|
||||
action: "JOIN_GROUP",
|
||||
groupId: 100
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
### Deploy an AT
|
||||
_Requires user approval_
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "DEPLOY_AT",
|
||||
creationBytes: "12345", // Must be Base58 encoded
|
||||
name: "test name",
|
||||
description: "test description",
|
||||
type: "test type",
|
||||
tags: "test tags",
|
||||
amount: 1.00000000, // 1 QORT
|
||||
assetId: 0,
|
||||
// fee: 0.002 // optional - will use default fee if excluded
|
||||
});
|
||||
```
|
||||
|
||||
### Get AT info
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_AT",
|
||||
atAddress: "ASRUsCjk6fa5bujv3oWYmWaVqNtvxydpPH"
|
||||
});
|
||||
```
|
||||
|
||||
### Get AT data bytes (base58 encoded)
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_AT_DATA",
|
||||
atAddress: "ASRUsCjk6fa5bujv3oWYmWaVqNtvxydpPH"
|
||||
});
|
||||
```
|
||||
|
||||
### List ATs by functionality
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "LIST_ATS",
|
||||
codeHash58: "4KdJETRAdymE7dodDmJbf5d9L1bp4g5Nxky8m47TBkvA",
|
||||
isExecutable: true,
|
||||
limit: 100,
|
||||
offset: 0,
|
||||
reverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Fetch block by signature
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "FETCH_BLOCK",
|
||||
signature: "875yGFUy1zHV2hmxNWzrhtn9S1zkeD7SQppwdXFysvTXrankCHCz4iyAUgCBM3GjvibbnyRQpriuy1cyu953U1u5uQdzuH3QjQivi9UVwz86z1Akn17MGd5Z5STjpDT7248K6vzMamuqDei57Znonr8GGgn8yyyABn35CbZUCeAuXju"
|
||||
});
|
||||
```
|
||||
|
||||
### Fetch block by height
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "FETCH_BLOCK",
|
||||
height: "1139850"
|
||||
});
|
||||
```
|
||||
|
||||
### Fetch a range of blocks
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "FETCH_BLOCK_RANGE",
|
||||
height: "1139800",
|
||||
count: 20,
|
||||
reverse: false
|
||||
});
|
||||
```
|
||||
|
||||
### Search transactions
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "SEARCH_TRANSACTIONS",
|
||||
// startBlock: 1139000,
|
||||
// blockLimit: 1000,
|
||||
txGroupId: 0,
|
||||
txType: [
|
||||
"PAYMENT",
|
||||
"REWARD_SHARE"
|
||||
],
|
||||
confirmationStatus: "CONFIRMED",
|
||||
limit: 10,
|
||||
offset: 0,
|
||||
reverse: false
|
||||
});
|
||||
```
|
||||
|
||||
### Get an estimate of the QORT price
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_PRICE",
|
||||
blockchain: "LITECOIN",
|
||||
// maxtrades: 10,
|
||||
inverse: true
|
||||
});
|
||||
```
|
||||
|
||||
### Get URL to load a QDN resource
|
||||
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
|
||||
```
|
||||
let url = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_URL",
|
||||
service: "THUMBNAIL",
|
||||
name: "QortalDemo",
|
||||
identifier: "qortal_avatar"
|
||||
// path: "filename.jpg" // optional - not needed if resource contains only one file
|
||||
});
|
||||
```
|
||||
|
||||
### Get URL to load a QDN website
|
||||
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
|
||||
```
|
||||
let url = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_URL",
|
||||
service: "WEBSITE",
|
||||
name: "QortalDemo",
|
||||
});
|
||||
```
|
||||
|
||||
### Get URL to load a specific file from a QDN website
|
||||
Note: this returns a "Resource does not exist" error if a non-existent resource is requested.
|
||||
```
|
||||
let url = await qortalRequest({
|
||||
action: "GET_QDN_RESOURCE_URL",
|
||||
service: "WEBSITE",
|
||||
name: "AlphaX",
|
||||
path: "/assets/img/logo.png"
|
||||
});
|
||||
```
|
||||
|
||||
### Link/redirect to another QDN website
|
||||
Note: an alternate method is to include `<a href="qortal://WEBSITE/QortalDemo">link text</a>` within your HTML code.
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "LINK_TO_QDN_RESOURCE",
|
||||
service: "WEBSITE",
|
||||
name: "QortalDemo",
|
||||
});
|
||||
```
|
||||
|
||||
### Link/redirect to a specific path of another QDN website
|
||||
Note: an alternate method is to include `<a href="qortal://WEBSITE/QortalDemo/minting-leveling/index.html">link text</a>` within your HTML code.
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "LINK_TO_QDN_RESOURCE",
|
||||
service: "WEBSITE",
|
||||
name: "QortalDemo",
|
||||
path: "/minting-leveling/index.html"
|
||||
});
|
||||
```
|
||||
|
||||
### Get the contents of a list
|
||||
_Requires user approval_
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "GET_LIST_ITEMS",
|
||||
list_name: "followedNames"
|
||||
});
|
||||
```
|
||||
|
||||
### Add one or more items to a list
|
||||
_Requires user approval_
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "ADD_LIST_ITEMS",
|
||||
list_name: "blockedNames",
|
||||
items: ["QortalDemo"]
|
||||
});
|
||||
```
|
||||
|
||||
### Delete a single item from a list
|
||||
_Requires user approval_.
|
||||
Items must be deleted one at a time.
|
||||
```
|
||||
let res = await qortalRequest({
|
||||
action: "DELETE_LIST_ITEM",
|
||||
list_name: "blockedNames",
|
||||
item: "QortalDemo"
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
# Section 4: Examples
|
||||
|
||||
## Sample App
|
||||
|
||||
Here is a sample application to display the logged-in user's avatar:
|
||||
```
|
||||
<html>
|
||||
<head>
|
||||
<script>
|
||||
async function showAvatar() {
|
||||
try {
|
||||
// Get QORT address of logged in account
|
||||
let account = await qortalRequest({
|
||||
action: "GET_USER_ACCOUNT"
|
||||
});
|
||||
let address = account.address;
|
||||
console.log("address: " + address);
|
||||
|
||||
// Get names owned by this account
|
||||
let names = await qortalRequest({
|
||||
action: "GET_ACCOUNT_NAMES",
|
||||
address: address
|
||||
});
|
||||
console.log("names: " + JSON.stringify(names));
|
||||
|
||||
if (names.length == 0) {
|
||||
console.log("User has no registered names");
|
||||
return;
|
||||
}
|
||||
|
||||
// Download base64-encoded avatar of the first registered name
|
||||
let avatar = await qortalRequest({
|
||||
action: "FETCH_QDN_RESOURCE",
|
||||
name: names[0].name,
|
||||
service: "THUMBNAIL",
|
||||
identifier: "qortal_avatar",
|
||||
encoding: "base64"
|
||||
});
|
||||
console.log("Avatar size: " + avatar.length + " bytes");
|
||||
|
||||
// Display the avatar image on the screen
|
||||
document.getElementById("avatar").src = "data:image/png;base64," + avatar;
|
||||
|
||||
} catch(e) {
|
||||
console.log("Error: " + JSON.stringify(e));
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body onload="showAvatar()">
|
||||
<img width="500" id="avatar" />
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
|
||||
# Section 5: Testing and Development
|
||||
|
||||
Publishing an in-development app to mainnet isn't recommended. There are several options for developing and testing a Q-app before publishing to mainnet:
|
||||
|
||||
### Preview mode
|
||||
|
||||
Select "Preview" in the UI after choosing the zip. This allows for full Q-App testing without the need to publish any data.
|
||||
|
||||
|
||||
### Testnets
|
||||
|
||||
For an end-to-end test of Q-App publishing, you can use the official testnet, or set up a single node testnet of your own (often referred to as devnet) on your local machine. See [Single Node Testnet Quick Start Guide](testnet/README.md#quick-start).
|
||||
|
||||
|
||||
### Debugging
|
||||
|
||||
It is recommended that you develop and test in a web browser, to allow access to the javascript console. To do this:
|
||||
1. Open the UI app, then minimise it.
|
||||
2. In a Chromium-based web browser, visit: http://localhost:12388/
|
||||
3. Log in to your account and then preview your app/website.
|
||||
4. Go to `View > Developer > JavaScript Console`. Here you can monitor console logs, errors, and network requests from your app, in the same way as any other web-app.
|
||||
69
TestNets.md
69
TestNets.md
@@ -1,69 +0,0 @@
|
||||
# How to build a testnet
|
||||
|
||||
## Create testnet blockchain config
|
||||
|
||||
- You can begin by copying the mainnet blockchain config `src/main/resources/blockchain.json`
|
||||
- Insert `"isTestChain": true,` after the opening `{`
|
||||
- Modify testnet genesis block
|
||||
|
||||
### Testnet genesis block
|
||||
|
||||
- Set `timestamp` to a nearby future value, e.g. 15 mins from 'now'
|
||||
This is to give yourself enough time to set up other testnet nodes
|
||||
- Retain the initial `ISSUE_ASSET` transactions!
|
||||
- Add `ACCOUNT_FLAGS` transactions with `"andMask": -1, "orMask": 1, "xorMask": 0` to create founders
|
||||
- Add at least one `REWARD_SHARE` transaction otherwise no-one can mint initial blocks!
|
||||
You will need to calculate `rewardSharePublicKey` (and private key),
|
||||
or make a new account on mainnet and use self-share key values
|
||||
- Add `ACCOUNT_LEVEL` transactions to set initial level of accounts as needed
|
||||
- Add `GENESIS` transactions to add QORT/LEGACY_QORA funds to accounts as needed
|
||||
|
||||
## Testnet `settings.json`
|
||||
|
||||
- Create a new `settings-test.json`
|
||||
- Make sure to add `"isTestNet": true,`
|
||||
- Make sure to reference testnet blockchain config file: `"blockchainConfig": "testchain.json",`
|
||||
- It is a good idea to use a separate database: `"repositoryPath": "db-testnet",`
|
||||
- You might also need to add `"bitcoinNet": "TEST3",` and `"litecoinNet": "TEST3",`
|
||||
|
||||
## Other nodes
|
||||
|
||||
- Copy `testchain.json` and `settings-test.json` to other nodes
|
||||
- Alternatively, you can run multiple nodes on the same machine by:
|
||||
* Copying `settings-test.json` to `settings-test-1.json`
|
||||
* Configure different `repositoryPath`
|
||||
* Configure use of different ports:
|
||||
+ `"apiPort": 22391,`
|
||||
+ `"listenPort": 22392,`
|
||||
|
||||
## Starting-up
|
||||
|
||||
- Start up at least as many nodes as `minBlockchainPeers` (or adjust this value instead)
|
||||
- Probably best to perform API call `DELETE /peers/known`
|
||||
- Add other nodes via API call `POST /peers <peer-hostname-or-IP>`
|
||||
- Add minting private key to node(s) via API call `POST /admin/mintingaccounts <minting-private-key>`
|
||||
This key must have corresponding `REWARD_SHARE` transaction in testnet genesis block
|
||||
- Wait for genesis block timestamp to pass
|
||||
- A node should mint block 2 approximately 60 seconds after genesis block timestamp
|
||||
- Other testnet nodes will sync *as long as there is at least `minBlockchainPeers` peers with an "up-to-date" chain`
|
||||
- You can also use API call `POST /admin/forcesync <connected-peer-IP-and-port>` on stuck nodes
|
||||
|
||||
## Dealing with stuck chain
|
||||
|
||||
Maybe your nodes have been offline and no-one has minted a recent testnet block.
|
||||
Your options are:
|
||||
|
||||
- Start a new testnet from scratch
|
||||
- Fire up your testnet node(s)
|
||||
- Force one of your nodes to mint by:
|
||||
+ Set a debugger breakpoint on Settings.getMinBlockchainPeers()
|
||||
+ When breakpoint is hit, change `this.minBlockchainPeers` to zero, then continue
|
||||
- Once one of your nodes has minted blocks up to 'now', you can use "forcesync" on the other nodes
|
||||
|
||||
## Tools
|
||||
|
||||
- `qort` tool, but use `-t` option for default testnet API port (62391)
|
||||
- `qort` tool, but first set shell variable: `export BASE_URL=some-node-hostname-or-ip:port`
|
||||
- `qort` tool, but prepend with one-time shell variable: `BASE_URL=some-node-hostname-or-ip:port qort ......`
|
||||
- `peer-heights`, but use `-t` option, or `BASE_URL` shell variable as above
|
||||
|
||||
@@ -17,10 +17,10 @@
|
||||
<ROW Property="Manufacturer" Value="Qortal"/>
|
||||
<ROW Property="MsiLogging" MultiBuildValue="DefaultBuild:vp"/>
|
||||
<ROW Property="NTP_GOOD" Value="false"/>
|
||||
<ROW Property="ProductCode" Value="1033:{8CB28EE4-A407-4CED-A7C5-5138BCBF4B13} 1049:{C416F381-0C39-459C-A571-AC686D57B808} 2052:{A3DBA289-D2F0-4A3F-94DC-B89F5A6465DE} 2057:{893B13A5-45B6-4E49-8690-3FE46BCA50DF} " Type="16"/>
|
||||
<ROW Property="ProductCode" Value="1033:{CB85115E-ECCE-4B3D-BB7F-6251A2764922} 1049:{09AC1C62-4E33-4312-826A-38F597ED1B17} 2052:{3CF701B3-E118-4A31-A4B7-156CEA19FBCC} 2057:{468F337D-0EF8-41D1-B5DE-4EEE66BA2AF6} " Type="16"/>
|
||||
<ROW Property="ProductLanguage" Value="2057"/>
|
||||
<ROW Property="ProductName" Value="Qortal"/>
|
||||
<ROW Property="ProductVersion" Value="3.1.0" Type="32"/>
|
||||
<ROW Property="ProductVersion" Value="3.8.5" Type="32"/>
|
||||
<ROW Property="RECONFIG_NTP" Value="true"/>
|
||||
<ROW Property="REMOVE_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
<ROW Property="REPAIR_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
@@ -212,7 +212,7 @@
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_71" ComponentId="{12A3ADBE-BB7A-496C-8869-410681E6232F}" Directory_="jdk.zipfs_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_71" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_8" ComponentId="{D53AD95E-CF96-4999-80FC-5812277A7456}" Directory_="java.naming_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_8" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_9" ComponentId="{6B7EA9B0-5D17-47A8-B78C-FACE86D15E01}" Directory_="java.net.http_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_9" Type="0"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{551D2FA5-BE83-4DA1-A96E-7D15C517F9FF}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{094B5D07-2258-4A39-9917-2E2F7F6E210B}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_ExePath" ComponentId="{3644948D-AE0B-41BB-9FAF-A79E70490A08}" Directory_="APPDIR" Attributes="260" KeyPath="AI_ExePath"/>
|
||||
<ROW Component="APPDIR" ComponentId="{680DFDDE-3FB4-47A5-8FF5-934F576C6F91}" Directory_="APPDIR" Attributes="0"/>
|
||||
<ROW Component="AccessBridgeCallbacks.h" ComponentId="{288055D1-1062-47A3-AA44-5601B4E38AED}" Directory_="bridge_Dir" Attributes="0" KeyPath="AccessBridgeCallbacks.h" Type="0"/>
|
||||
@@ -1173,7 +1173,7 @@
|
||||
<ROW Action="AI_STORE_LOCATION" Type="51" Source="ARPINSTALLLOCATION" Target="[APPDIR]"/>
|
||||
<ROW Action="AI_SetPermissions" Type="11265" Source="userAccounts.dll" Target="OnSetPermissions" WithoutSeq="true"/>
|
||||
<ROW Action="CustomizeLog4j2PropertiesScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property("CustomActionData"); var actionDataArray = actionData.split("|"); var appDir = actionDataArray[0]; var dataFolder = actionDataArray[1] + actionDataArray[2] + "\\"; var ForReading = 1, ForWriting = 2, ForAppending = 8; var fso = new ActiveXObject("Scripting.FileSystemObject"); // Make copy fso.CopyFile(appDir + "log4j2.properties", appDir + "log4j2-orig.properties", true); // overwrite // Rewrite %AppDir%\log4j2.properties to update logfile storage path var fin = fso.OpenTextFile(appDir + "log4j2-orig.properties", ForReading, false); // no create var fout = fso.OpenTextFile(appDir + "log4j2.properties", ForWriting, true); // can create // Copy lines with rewriting where necessary while( !fin.AtEndOfStream ) { 	var line = fin.ReadLine(); 	var start = line.indexOf("property.dirname"); 	if (start > 0) { 		// line: # property.dirname = ...appdata... 		// uncomment/replace this line for Windows 		fout.WriteLine( "property.dirname = " + dataFolder.split('\\').join('\\\\') ); 	} else { 		// not found - output verbatim 		fout.WriteLine( line ); 	} } fin.Close(); fout.Close(); " AdditionalSeq="AI_DATA_SETTER_4"/>
|
||||
<ROW Action="CustomizeSettingsJsonScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property("CustomActionData"); var actionDataArray = actionData.split("|"); var appDir = actionDataArray[0]; var dataFolder = actionDataArray[1] + actionDataArray[2] + "\\"; var ForReading = 1, ForWriting = 2, ForAppending = 8; var fso = new ActiveXObject("Scripting.FileSystemObject"); // Create basic %APPDIR%\settings.json with path to real settings.json in dataFolder var fts = fso.OpenTextFile(appDir + "settings.json", ForWriting, true); fts.WriteLine( "{" ); // We need to escape Windows path backslashes to keep JSON valid fts.WriteLine( " \"userPath\": \"" + dataFolder.split('\\').join('\\\\') + "\"" ); fts.WriteLine( "}" ); fts.Close(); // Make copy fso.CopyFile(dataFolder + "settings.json", dataFolder + "settings-orig.json", true); // overwrite // Rewrite settings.json to update repository path var fin = fso.OpenTextFile(dataFolder + "settings-orig.json", ForReading, false); var fout = fso.OpenTextFile(dataFolder + "settings.json", ForWriting, true); // First line should contain opening brace fout.WriteLine( fin.ReadLine() ); // Append our entries fout.WriteLine( " \"repositoryPath\": \"" + dataFolder.split('\\').join('\\\\') + "db\"," ); fout.WriteLine( " \"dataPath\": \"" + dataFolder.split('\\').join('\\\\') + "data\"," ); // copy rest of settings while( !fin.AtEndOfStream ) { 	fout.WriteLine( fin.ReadLine() ); } fin.Close(); fout.Close(); " AdditionalSeq="AI_DATA_SETTER_3"/>
|
||||
<ROW Action="CustomizeSettingsJsonScript" Type="3109" Target="Script Text" TargetUnformatted="var actionData = Session.Property("CustomActionData"); var actionDataArray = actionData.split("|"); var appDir = actionDataArray[0]; var dataFolder = actionDataArray[1] + actionDataArray[2] + "\\"; var ForReading = 1, ForWriting = 2, ForAppending = 8; var fso = new ActiveXObject("Scripting.FileSystemObject"); // Create basic %APPDIR%\settings.json with path to real settings.json in dataFolder var fts = fso.OpenTextFile(appDir + "settings.json", ForWriting, true); fts.WriteLine( "{" ); // We need to escape Windows path backslashes to keep JSON valid fts.WriteLine( " \"userPath\": \"" + dataFolder.split('\\').join('\\\\') + "\"" ); fts.WriteLine( "}" ); fts.Close(); // Make copy fso.CopyFile(dataFolder + "settings.json", dataFolder + "settings-orig.json", true); // overwrite // Rewrite settings.json to update repository path var fin = fso.OpenTextFile(dataFolder + "settings-orig.json", ForReading, false); var fout = fso.OpenTextFile(dataFolder + "settings.json", ForWriting, true); // First line should contain opening brace fout.WriteLine( fin.ReadLine() ); // Append our entries fout.WriteLine( " \"repositoryPath\": \"" + dataFolder.split('\\').join('\\\\') + "db\"," ); fout.WriteLine( " \"dataPath\": \"" + dataFolder.split('\\').join('\\\\') + "data\"," ); fout.WriteLine( " \"walletsPath\": \"" + dataFolder.split('\\').join('\\\\') + "wallets\"," ); fout.WriteLine( " \"listsPath\": \"" + dataFolder.split('\\').join('\\\\') + "lists\"," ); // copy rest of settings while( !fin.AtEndOfStream ) { 	fout.WriteLine( fin.ReadLine() ); } fin.Close(); fout.Close(); " AdditionalSeq="AI_DATA_SETTER_3"/>
|
||||
<ROW Action="DetectRunningProcess" Type="1" Source="aicustact.dll" Target="DetectProcess" Options="3" AdditionalSeq="AI_DATA_SETTER_8"/>
|
||||
<ROW Action="DetectW32Time" Type="1" Source="aicustact.dll" Target="DetectService" Options="3" AdditionalSeq="AI_DATA_SETTER_11"/>
|
||||
<ROW Action="NTP_config" Type="3090" Source="ntpcfg.bat"/>
|
||||
|
||||
BIN
lib/com/dosse/WaifUPnP/1.1/WaifUPnP-1.1.jar
Normal file
BIN
lib/com/dosse/WaifUPnP/1.1/WaifUPnP-1.1.jar
Normal file
Binary file not shown.
9
lib/com/dosse/WaifUPnP/1.1/WaifUPnP-1.1.pom
Normal file
9
lib/com/dosse/WaifUPnP/1.1/WaifUPnP-1.1.pom
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.dosse</groupId>
|
||||
<artifactId>WaifUPnP</artifactId>
|
||||
<version>1.1</version>
|
||||
<description>POM was created from install:install-file</description>
|
||||
</project>
|
||||
12
lib/com/dosse/WaifUPnP/maven-metadata-local.xml
Normal file
12
lib/com/dosse/WaifUPnP/maven-metadata-local.xml
Normal file
@@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<metadata>
|
||||
<groupId>com.dosse</groupId>
|
||||
<artifactId>WaifUPnP</artifactId>
|
||||
<versioning>
|
||||
<release>1.1</release>
|
||||
<versions>
|
||||
<version>1.1</version>
|
||||
</versions>
|
||||
<lastUpdated>20220218200127</lastUpdated>
|
||||
</versioning>
|
||||
</metadata>
|
||||
BIN
lib/org/ciyam/AT/1.4.0/AT-1.4.0.jar
Normal file
BIN
lib/org/ciyam/AT/1.4.0/AT-1.4.0.jar
Normal file
Binary file not shown.
9
lib/org/ciyam/AT/1.4.0/AT-1.4.0.pom
Normal file
9
lib/org/ciyam/AT/1.4.0/AT-1.4.0.pom
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<version>1.4.0</version>
|
||||
<description>POM was created from install:install-file</description>
|
||||
</project>
|
||||
@@ -3,14 +3,15 @@
|
||||
<groupId>org.ciyam</groupId>
|
||||
<artifactId>AT</artifactId>
|
||||
<versioning>
|
||||
<release>1.3.8</release>
|
||||
<release>1.4.0</release>
|
||||
<versions>
|
||||
<version>1.3.4</version>
|
||||
<version>1.3.5</version>
|
||||
<version>1.3.6</version>
|
||||
<version>1.3.7</version>
|
||||
<version>1.3.8</version>
|
||||
<version>1.4.0</version>
|
||||
</versions>
|
||||
<lastUpdated>20200925114415</lastUpdated>
|
||||
<lastUpdated>20221105114346</lastUpdated>
|
||||
</versioning>
|
||||
</metadata>
|
||||
|
||||
62
pom.xml
62
pom.xml
@@ -3,15 +3,15 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.qortal</groupId>
|
||||
<artifactId>qortal</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<version>4.0.1</version>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<skipTests>true</skipTests>
|
||||
<altcoinj.version>bf9fb80</altcoinj.version>
|
||||
<altcoinj.version>7dc8c6f</altcoinj.version>
|
||||
<bitcoinj.version>0.15.10</bitcoinj.version>
|
||||
<bouncycastle.version>1.64</bouncycastle.version>
|
||||
<bouncycastle.version>1.69</bouncycastle.version>
|
||||
<build.timestamp>${maven.build.timestamp}</build.timestamp>
|
||||
<ciyam-at.version>1.3.8</ciyam-at.version>
|
||||
<ciyam-at.version>1.4.0</ciyam-at.version>
|
||||
<commons-net.version>3.6</commons-net.version>
|
||||
<commons-text.version>1.8</commons-text.version>
|
||||
<commons-io.version>2.6</commons-io.version>
|
||||
@@ -21,6 +21,9 @@
|
||||
<dagger.version>1.2.2</dagger.version>
|
||||
<guava.version>28.1-jre</guava.version>
|
||||
<hsqldb.version>2.5.1</hsqldb.version>
|
||||
<homoglyph.version>1.2.1</homoglyph.version>
|
||||
<icu4j.version>70.1</icu4j.version>
|
||||
<upnp.version>1.1</upnp.version>
|
||||
<jersey.version>2.29.1</jersey.version>
|
||||
<jetty.version>9.4.29.v20200521</jetty.version>
|
||||
<log4j.version>2.17.1</log4j.version>
|
||||
@@ -31,6 +34,9 @@
|
||||
<package-info-maven-plugin.version>1.1.0</package-info-maven-plugin.version>
|
||||
<jsoup.version>1.13.1</jsoup.version>
|
||||
<java-diff-utils.version>4.10</java-diff-utils.version>
|
||||
<grpc.version>1.45.1</grpc.version>
|
||||
<protobuf.version>3.19.4</protobuf.version>
|
||||
<simplemagic.version>1.17</simplemagic.version>
|
||||
</properties>
|
||||
<build>
|
||||
<sourceDirectory>src/main/java</sourceDirectory>
|
||||
@@ -142,6 +148,7 @@
|
||||
tagsSorter: "alpha",
|
||||
operationsSorter:
|
||||
"alpha",
|
||||
validatorUrl: false,
|
||||
</value>
|
||||
</replacement>
|
||||
</replacements>
|
||||
@@ -299,6 +306,7 @@
|
||||
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<mainClass>org.qortal.controller.Controller</mainClass>
|
||||
<manifestEntries>
|
||||
<Multi-Release>true</Multi-Release>
|
||||
<Class-Path>. ..</Class-Path>
|
||||
</manifestEntries>
|
||||
</transformer>
|
||||
@@ -427,6 +435,12 @@
|
||||
<artifactId>AT</artifactId>
|
||||
<version>${ciyam-at.version}</version>
|
||||
</dependency>
|
||||
<!-- UPnP support -->
|
||||
<dependency>
|
||||
<groupId>com.dosse</groupId>
|
||||
<artifactId>WaifUPnP</artifactId>
|
||||
<version>${upnp.version}</version>
|
||||
</dependency>
|
||||
<!-- Bitcoin support -->
|
||||
<dependency>
|
||||
<groupId>org.bitcoinj</groupId>
|
||||
@@ -435,7 +449,7 @@
|
||||
</dependency>
|
||||
<!-- For Litecoin, etc. support, requires bitcoinj -->
|
||||
<dependency>
|
||||
<groupId>com.github.jjos2372</groupId>
|
||||
<groupId>com.github.qortal</groupId>
|
||||
<artifactId>altcoinj</artifactId>
|
||||
<version>${altcoinj.version}</version>
|
||||
</dependency>
|
||||
@@ -561,7 +575,18 @@
|
||||
<dependency>
|
||||
<groupId>net.codebox</groupId>
|
||||
<artifactId>homoglyph</artifactId>
|
||||
<version>1.2.0</version>
|
||||
<version>${homoglyph.version}</version>
|
||||
</dependency>
|
||||
<!-- Unicode support -->
|
||||
<dependency>
|
||||
<groupId>com.ibm.icu</groupId>
|
||||
<artifactId>icu4j</artifactId>
|
||||
<version>${icu4j.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.ibm.icu</groupId>
|
||||
<artifactId>icu4j-charset</artifactId>
|
||||
<version>${icu4j.version}</version>
|
||||
</dependency>
|
||||
<!-- Jetty -->
|
||||
<dependency>
|
||||
@@ -685,5 +710,30 @@
|
||||
<artifactId>java-diff-utils</artifactId>
|
||||
<version>${java-diff-utils.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-netty</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-protobuf</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.j256.simplemagic</groupId>
|
||||
<artifactId>simplemagic</artifactId>
|
||||
<version>${simplemagic.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
||||
4499
src/main/java/cash/z/wallet/sdk/rpc/CompactFormats.java
Normal file
4499
src/main/java/cash/z/wallet/sdk/rpc/CompactFormats.java
Normal file
File diff suppressed because it is too large
Load Diff
1341
src/main/java/cash/z/wallet/sdk/rpc/CompactTxStreamerGrpc.java
Normal file
1341
src/main/java/cash/z/wallet/sdk/rpc/CompactTxStreamerGrpc.java
Normal file
File diff suppressed because it is too large
Load Diff
3854
src/main/java/cash/z/wallet/sdk/rpc/Darkside.java
Normal file
3854
src/main/java/cash/z/wallet/sdk/rpc/Darkside.java
Normal file
File diff suppressed because it is too large
Load Diff
1086
src/main/java/cash/z/wallet/sdk/rpc/DarksideStreamerGrpc.java
Normal file
1086
src/main/java/cash/z/wallet/sdk/rpc/DarksideStreamerGrpc.java
Normal file
File diff suppressed because it is too large
Load Diff
15106
src/main/java/cash/z/wallet/sdk/rpc/Service.java
Normal file
15106
src/main/java/cash/z/wallet/sdk/rpc/Service.java
Normal file
File diff suppressed because it is too large
Load Diff
100
src/main/java/com/rust/litewalletjni/LiteWalletJni.java
Normal file
100
src/main/java/com/rust/litewalletjni/LiteWalletJni.java
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright (C) 2009 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* LiteWalletJni code based on https://github.com/PirateNetwork/cordova-plugin-litewallet
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
* Copyright (c) 2020 Zero Currency Coin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
package com.rust.litewalletjni;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.PirateChainWalletController;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
public class LiteWalletJni {
|
||||
|
||||
protected static final Logger LOGGER = LogManager.getLogger(LiteWalletJni.class);
|
||||
|
||||
public static native String initlogging();
|
||||
public static native String initnew(final String serveruri, final String params, final String saplingOutputb64, final String saplingSpendb64);
|
||||
public static native String initfromseed(final String serveruri, final String params, final String seed, final String birthday, final String saplingOutputb64, final String saplingSpendb64);
|
||||
public static native String initfromb64(final String serveruri, final String params, final String datab64, final String saplingOutputb64, final String saplingSpendb64);
|
||||
public static native String save();
|
||||
|
||||
public static native String execute(final String cmd, final String args);
|
||||
public static native String getseedphrase();
|
||||
public static native String getseedphrasefromentropyb64(final String entropy64);
|
||||
public static native String checkseedphrase(final String input);
|
||||
|
||||
|
||||
private static boolean loaded = false;
|
||||
|
||||
public static void loadLibrary() {
|
||||
if (loaded) {
|
||||
return;
|
||||
}
|
||||
String osName = System.getProperty("os.name");
|
||||
String osArchitecture = System.getProperty("os.arch");
|
||||
|
||||
LOGGER.info("OS Name: {}", osName);
|
||||
LOGGER.info("OS Architecture: {}", osArchitecture);
|
||||
|
||||
try {
|
||||
String libFileName = PirateChainWalletController.getRustLibFilename();
|
||||
if (libFileName == null) {
|
||||
LOGGER.info("Library not found for OS: {}, arch: {}", osName, osArchitecture);
|
||||
return;
|
||||
}
|
||||
|
||||
Path libPath = Paths.get(PirateChainWalletController.getRustLibOuterDirectory().toString(), libFileName);
|
||||
System.load(libPath.toAbsolutePath().toString());
|
||||
loaded = true;
|
||||
}
|
||||
catch (UnsatisfiedLinkError e) {
|
||||
LOGGER.info("Unable to load library");
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean isLoaded() {
|
||||
return loaded;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.security.Security;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@@ -18,6 +19,8 @@ import org.qortal.api.ApiRequest;
|
||||
import org.qortal.controller.AutoUpdate;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import static org.qortal.controller.AutoUpdate.AGENTLIB_JVM_HOLDER_ARG;
|
||||
|
||||
public class ApplyUpdate {
|
||||
|
||||
static {
|
||||
@@ -37,7 +40,7 @@ public class ApplyUpdate {
|
||||
private static final String JAVA_TOOL_OPTIONS_NAME = "JAVA_TOOL_OPTIONS";
|
||||
private static final String JAVA_TOOL_OPTIONS_VALUE = "-XX:MaxRAMFraction=4";
|
||||
|
||||
private static final long CHECK_INTERVAL = 10 * 1000L; // ms
|
||||
private static final long CHECK_INTERVAL = 30 * 1000L; // ms
|
||||
private static final int MAX_ATTEMPTS = 12;
|
||||
|
||||
public static void main(String[] args) {
|
||||
@@ -197,6 +200,11 @@ public class ApplyUpdate {
|
||||
// JVM arguments
|
||||
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
|
||||
|
||||
// Reapply any retained, but disabled, -agentlib JVM arg
|
||||
javaCmd = javaCmd.stream()
|
||||
.map(arg -> arg.replace(AGENTLIB_JVM_HOLDER_ARG, "-agentlib"))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Call mainClass in JAR
|
||||
javaCmd.addAll(Arrays.asList("-jar", JAR_FILENAME));
|
||||
|
||||
@@ -205,7 +213,7 @@ public class ApplyUpdate {
|
||||
}
|
||||
|
||||
try {
|
||||
LOGGER.info(() -> String.format("Restarting node with: %s", String.join(" ", javaCmd)));
|
||||
LOGGER.info(String.format("Restarting node with: %s", String.join(" ", javaCmd)));
|
||||
|
||||
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
|
||||
|
||||
@@ -214,8 +222,15 @@ public class ApplyUpdate {
|
||||
processBuilder.environment().put(JAVA_TOOL_OPTIONS_NAME, JAVA_TOOL_OPTIONS_VALUE);
|
||||
}
|
||||
|
||||
processBuilder.start();
|
||||
} catch (IOException e) {
|
||||
// New process will inherit our stdout and stderr
|
||||
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
|
||||
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
|
||||
|
||||
Process process = processBuilder.start();
|
||||
|
||||
// Nothing to pipe to new process, so close output stream (process's stdin)
|
||||
process.getOutputStream().close();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(String.format("Failed to restart node (BAD): %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.NONE) // Stops JAX-RS errors when unmarshalling blockchain config
|
||||
@@ -59,7 +61,17 @@ public class Account {
|
||||
// Balance manipulations - assetId is 0 for QORT
|
||||
|
||||
public long getConfirmedBalance(long assetId) throws DataException {
|
||||
AccountBalanceData accountBalanceData = this.repository.getAccountRepository().getBalance(this.address, assetId);
|
||||
AccountBalanceData accountBalanceData;
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
// Lite nodes request data from peers instead of the local db
|
||||
accountBalanceData = LiteNode.getInstance().fetchAccountBalance(this.address, assetId);
|
||||
}
|
||||
else {
|
||||
// All other node types fetch from the local db
|
||||
accountBalanceData = this.repository.getAccountRepository().getBalance(this.address, assetId);
|
||||
}
|
||||
|
||||
if (accountBalanceData == null)
|
||||
return 0;
|
||||
|
||||
@@ -199,12 +211,24 @@ public class Account {
|
||||
if (level != null && level >= BlockChain.getInstance().getMinAccountLevelToMint())
|
||||
return true;
|
||||
|
||||
if (Account.isFounder(accountData.getFlags()))
|
||||
// Founders can always mint, unless they have a penalty
|
||||
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Returns account's blockMinted (0+) or null if account not found in repository. */
|
||||
public Integer getBlocksMinted() throws DataException {
|
||||
return this.repository.getAccountRepository().getMintedBlockCount(this.address);
|
||||
}
|
||||
|
||||
/** Returns account's blockMintedPenalty or null if account not found in repository. */
|
||||
public Integer getBlocksMintedPenalty() throws DataException {
|
||||
return this.repository.getAccountRepository().getBlocksMintedPenaltyCount(this.address);
|
||||
}
|
||||
|
||||
|
||||
/** Returns whether account can build reward-shares.
|
||||
* <p>
|
||||
* To be able to create reward-shares, the account needs to pass at least one of these tests:<br>
|
||||
@@ -225,7 +249,7 @@ public class Account {
|
||||
if (level != null && level >= BlockChain.getInstance().getMinAccountLevelToRewardShare())
|
||||
return true;
|
||||
|
||||
if (Account.isFounder(accountData.getFlags()))
|
||||
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -253,7 +277,7 @@ public class Account {
|
||||
/**
|
||||
* Returns 'effective' minting level, or zero if account does not exist/cannot mint.
|
||||
* <p>
|
||||
* For founder accounts, this returns "founderEffectiveMintingLevel" from blockchain config.
|
||||
* For founder accounts with no penalty, this returns "founderEffectiveMintingLevel" from blockchain config.
|
||||
*
|
||||
* @return 0+
|
||||
* @throws DataException
|
||||
@@ -263,7 +287,8 @@ public class Account {
|
||||
if (accountData == null)
|
||||
return 0;
|
||||
|
||||
if (Account.isFounder(accountData.getFlags()))
|
||||
// Founders are assigned a different effective minting level, as long as they have no penalty
|
||||
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
|
||||
return BlockChain.getInstance().getFounderEffectiveMintingLevel();
|
||||
|
||||
return accountData.getLevel();
|
||||
@@ -271,8 +296,6 @@ public class Account {
|
||||
|
||||
/**
|
||||
* Returns 'effective' minting level, or zero if reward-share does not exist.
|
||||
* <p>
|
||||
* For founder accounts, this returns "founderEffectiveMintingLevel" from blockchain config.
|
||||
*
|
||||
* @param repository
|
||||
* @param rewardSharePublicKey
|
||||
@@ -288,5 +311,26 @@ public class Account {
|
||||
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());
|
||||
return rewardShareMinter.getEffectiveMintingLevel();
|
||||
}
|
||||
/**
|
||||
* Returns 'effective' minting level, with a fix for the zero level.
|
||||
* <p>
|
||||
* For founder accounts with no penalty, this returns "founderEffectiveMintingLevel" from blockchain config.
|
||||
*
|
||||
* @param repository
|
||||
* @param rewardSharePublicKey
|
||||
* @return 0+
|
||||
* @throws DataException
|
||||
*/
|
||||
public static int getRewardShareEffectiveMintingLevelIncludingLevelZero(Repository repository, byte[] rewardSharePublicKey) throws DataException {
|
||||
// Find actual minter and get their effective minting level
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
|
||||
if (rewardShareData == null)
|
||||
return 0;
|
||||
|
||||
else if (!rewardShareData.getMinter().equals(rewardShareData.getRecipient())) // Sponsorship reward share
|
||||
return 0;
|
||||
|
||||
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());
|
||||
return rewardShareMinter.getEffectiveMintingLevel();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount {
|
||||
private final Ed25519PrivateKeyParameters edPrivateKeyParams;
|
||||
|
||||
/**
|
||||
* Create PrivateKeyAccount using byte[32] seed.
|
||||
* Create PrivateKeyAccount using byte[32] private key.
|
||||
*
|
||||
* @param seed
|
||||
* @param privateKey
|
||||
* byte[32] used to create private/public key pair
|
||||
* @throws IllegalArgumentException
|
||||
* if passed invalid seed
|
||||
* if passed invalid privateKey
|
||||
*/
|
||||
public PrivateKeyAccount(Repository repository, byte[] seed) {
|
||||
this(repository, new Ed25519PrivateKeyParameters(seed, 0));
|
||||
public PrivateKeyAccount(Repository repository, byte[] privateKey) {
|
||||
this(repository, new Ed25519PrivateKeyParameters(privateKey, 0));
|
||||
}
|
||||
|
||||
private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) {
|
||||
@@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount {
|
||||
return this.privateKey;
|
||||
}
|
||||
|
||||
public static byte[] toPublicKey(byte[] seed) {
|
||||
return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded();
|
||||
}
|
||||
|
||||
public byte[] sign(byte[] message) {
|
||||
return Crypto.sign(this.edPrivateKeyParams, message);
|
||||
}
|
||||
|
||||
367
src/main/java/org/qortal/account/SelfSponsorshipAlgoV1.java
Normal file
367
src/main/java/org/qortal/account/SelfSponsorshipAlgoV1.java
Normal file
@@ -0,0 +1,367 @@
|
||||
package org.qortal.account;
|
||||
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class SelfSponsorshipAlgoV1 {
|
||||
|
||||
private final Repository repository;
|
||||
private final String address;
|
||||
private final AccountData accountData;
|
||||
private final long snapshotTimestamp;
|
||||
private final boolean override;
|
||||
|
||||
private int registeredNameCount = 0;
|
||||
private int suspiciousCount = 0;
|
||||
private int suspiciousPercent = 0;
|
||||
private int consolidationCount = 0;
|
||||
private int bulkIssuanceCount = 0;
|
||||
private int recentSponsorshipCount = 0;
|
||||
|
||||
private List<RewardShareTransactionData> sponsorshipRewardShares = new ArrayList<>();
|
||||
private final Map<String, List<TransactionData>> paymentsByAddress = new HashMap<>();
|
||||
private final Set<String> sponsees = new LinkedHashSet<>();
|
||||
private Set<String> consolidatedAddresses = new LinkedHashSet<>();
|
||||
private final Set<String> zeroTransactionAddreses = new LinkedHashSet<>();
|
||||
private final Set<String> penaltyAddresses = new LinkedHashSet<>();
|
||||
|
||||
public SelfSponsorshipAlgoV1(Repository repository, String address, long snapshotTimestamp, boolean override) throws DataException {
|
||||
this.repository = repository;
|
||||
this.address = address;
|
||||
this.accountData = this.repository.getAccountRepository().getAccount(this.address);
|
||||
this.snapshotTimestamp = snapshotTimestamp;
|
||||
this.override = override;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return this.address;
|
||||
}
|
||||
|
||||
public Set<String> getPenaltyAddresses() {
|
||||
return this.penaltyAddresses;
|
||||
}
|
||||
|
||||
|
||||
public void run() throws DataException {
|
||||
if (this.accountData == null) {
|
||||
// Nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
this.fetchSponsorshipRewardShares();
|
||||
if (this.sponsorshipRewardShares.isEmpty()) {
|
||||
// Nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
this.findConsolidatedRewards();
|
||||
this.findBulkIssuance();
|
||||
this.findRegisteredNameCount();
|
||||
this.findRecentSponsorshipCount();
|
||||
|
||||
int score = this.calculateScore();
|
||||
if (score <= 0 && !override) {
|
||||
return;
|
||||
}
|
||||
|
||||
String newAddress = this.getDestinationAccount(this.address);
|
||||
while (newAddress != null) {
|
||||
// Found destination account
|
||||
this.penaltyAddresses.add(newAddress);
|
||||
|
||||
// Run algo for this address, but in "override" mode because it has already been flagged
|
||||
SelfSponsorshipAlgoV1 algoV1 = new SelfSponsorshipAlgoV1(this.repository, newAddress, this.snapshotTimestamp, true);
|
||||
algoV1.run();
|
||||
this.penaltyAddresses.addAll(algoV1.getPenaltyAddresses());
|
||||
|
||||
newAddress = this.getDestinationAccount(newAddress);
|
||||
}
|
||||
|
||||
this.penaltyAddresses.add(this.address);
|
||||
|
||||
if (this.override || this.recentSponsorshipCount < 20) {
|
||||
this.penaltyAddresses.addAll(this.consolidatedAddresses);
|
||||
this.penaltyAddresses.addAll(this.zeroTransactionAddreses);
|
||||
}
|
||||
else {
|
||||
this.penaltyAddresses.addAll(this.sponsees);
|
||||
}
|
||||
}
|
||||
|
||||
private String getDestinationAccount(String address) throws DataException {
|
||||
List<TransactionData> transferPrivsTransactions = fetchTransferPrivsForAddress(address);
|
||||
if (transferPrivsTransactions.isEmpty()) {
|
||||
// No TRANSFER_PRIVS transactions for this address
|
||||
return null;
|
||||
}
|
||||
|
||||
AccountData accountData = this.repository.getAccountRepository().getAccount(address);
|
||||
if (accountData == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (TransactionData transactionData : transferPrivsTransactions) {
|
||||
TransferPrivsTransactionData transferPrivsTransactionData = (TransferPrivsTransactionData) transactionData;
|
||||
if (Arrays.equals(transferPrivsTransactionData.getSenderPublicKey(), accountData.getPublicKey())) {
|
||||
return transferPrivsTransactionData.getRecipient();
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private void findConsolidatedRewards() throws DataException {
|
||||
List<String> sponseesThatSentRewards = new ArrayList<>();
|
||||
Map<String, Integer> paymentRecipients = new HashMap<>();
|
||||
|
||||
// Collect outgoing payments of each sponsee
|
||||
for (String sponseeAddress : this.sponsees) {
|
||||
|
||||
// Firstly fetch all payments for address, since the functions below depend on this data
|
||||
this.fetchPaymentsForAddress(sponseeAddress);
|
||||
|
||||
// Check if the address has zero relevant transactions
|
||||
if (this.hasZeroTransactions(sponseeAddress)) {
|
||||
this.zeroTransactionAddreses.add(sponseeAddress);
|
||||
}
|
||||
|
||||
// Get payment recipients
|
||||
List<String> allPaymentRecipients = this.fetchOutgoingPaymentRecipientsForAddress(sponseeAddress);
|
||||
if (allPaymentRecipients.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
sponseesThatSentRewards.add(sponseeAddress);
|
||||
|
||||
List<String> addressesPaidByThisSponsee = new ArrayList<>();
|
||||
for (String paymentRecipient : allPaymentRecipients) {
|
||||
if (addressesPaidByThisSponsee.contains(paymentRecipient)) {
|
||||
// We already tracked this association - don't allow multiple to stack up
|
||||
continue;
|
||||
}
|
||||
addressesPaidByThisSponsee.add(paymentRecipient);
|
||||
|
||||
// Increment count for this recipient, or initialize to 1 if not present
|
||||
if (paymentRecipients.computeIfPresent(paymentRecipient, (k, v) -> v + 1) == null) {
|
||||
paymentRecipients.put(paymentRecipient, 1);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Exclude addresses with a low number of payments
|
||||
Map<String, Integer> filteredPaymentRecipients = paymentRecipients.entrySet().stream()
|
||||
.filter(p -> p.getValue() != null && p.getValue() >= 10)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
// Now check how many sponsees have sent to this subset of addresses
|
||||
Map<String, Integer> sponseesThatConsolidatedRewards = new HashMap<>();
|
||||
for (String sponseeAddress : sponseesThatSentRewards) {
|
||||
List<String> allPaymentRecipients = this.fetchOutgoingPaymentRecipientsForAddress(sponseeAddress);
|
||||
// Remove any that aren't to one of the flagged recipients (i.e. consolidation)
|
||||
allPaymentRecipients.removeIf(r -> !filteredPaymentRecipients.containsKey(r));
|
||||
|
||||
int count = allPaymentRecipients.size();
|
||||
if (count == 0) {
|
||||
continue;
|
||||
}
|
||||
if (sponseesThatConsolidatedRewards.computeIfPresent(sponseeAddress, (k, v) -> v + count) == null) {
|
||||
sponseesThatConsolidatedRewards.put(sponseeAddress, count);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove sponsees that have only sent a low number of payments to the filtered addresses
|
||||
Map<String, Integer> filteredSponseesThatConsolidatedRewards = sponseesThatConsolidatedRewards.entrySet().stream()
|
||||
.filter(p -> p.getValue() != null && p.getValue() >= 2)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
this.consolidationCount = sponseesThatConsolidatedRewards.size();
|
||||
this.consolidatedAddresses = new LinkedHashSet<>(filteredSponseesThatConsolidatedRewards.keySet());
|
||||
this.suspiciousCount = this.consolidationCount + this.zeroTransactionAddreses.size();
|
||||
this.suspiciousPercent = (int)(this.suspiciousCount / (float) this.sponsees.size() * 100);
|
||||
}
|
||||
|
||||
private void findBulkIssuance() {
|
||||
Long lastTimestamp = null;
|
||||
for (RewardShareTransactionData rewardShareTransactionData : sponsorshipRewardShares) {
|
||||
long timestamp = rewardShareTransactionData.getTimestamp();
|
||||
if (timestamp >= this.snapshotTimestamp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lastTimestamp != null) {
|
||||
if (timestamp - lastTimestamp < 3*60*1000L) {
|
||||
this.bulkIssuanceCount++;
|
||||
}
|
||||
}
|
||||
lastTimestamp = timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
private void findRegisteredNameCount() throws DataException {
|
||||
int registeredNameCount = 0;
|
||||
for (String sponseeAddress : sponsees) {
|
||||
List<NameData> names = repository.getNameRepository().getNamesByOwner(sponseeAddress);
|
||||
for (NameData name : names) {
|
||||
if (name.getRegistered() < this.snapshotTimestamp) {
|
||||
registeredNameCount++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.registeredNameCount = registeredNameCount;
|
||||
}
|
||||
|
||||
private void findRecentSponsorshipCount() {
|
||||
final long referenceTimestamp = this.snapshotTimestamp - (365 * 24 * 60 * 60 * 1000L);
|
||||
int recentSponsorshipCount = 0;
|
||||
for (RewardShareTransactionData rewardShare : sponsorshipRewardShares) {
|
||||
if (rewardShare.getTimestamp() >= referenceTimestamp) {
|
||||
recentSponsorshipCount++;
|
||||
}
|
||||
}
|
||||
this.recentSponsorshipCount = recentSponsorshipCount;
|
||||
}
|
||||
|
||||
private int calculateScore() {
|
||||
final int suspiciousMultiplier = (this.suspiciousCount >= 100) ? this.suspiciousPercent : 1;
|
||||
final int nameMultiplier = (this.sponsees.size() >= 50 && this.registeredNameCount == 0) ? 2 : 1;
|
||||
final int consolidationMultiplier = Math.max(this.consolidationCount, 1);
|
||||
final int bulkIssuanceMultiplier = Math.max(this.bulkIssuanceCount / 2, 1);
|
||||
final int offset = 9;
|
||||
return suspiciousMultiplier * nameMultiplier * consolidationMultiplier * bulkIssuanceMultiplier - offset;
|
||||
}
|
||||
|
||||
private void fetchSponsorshipRewardShares() throws DataException {
|
||||
List<RewardShareTransactionData> sponsorshipRewardShares = new ArrayList<>();
|
||||
|
||||
// Define relevant transactions
|
||||
List<TransactionType> txTypes = List.of(TransactionType.REWARD_SHARE);
|
||||
List<TransactionData> transactionDataList = fetchTransactions(repository, txTypes, this.address, false);
|
||||
|
||||
for (TransactionData transactionData : transactionDataList) {
|
||||
if (transactionData.getType() != TransactionType.REWARD_SHARE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
RewardShareTransactionData rewardShareTransactionData = (RewardShareTransactionData) transactionData;
|
||||
|
||||
// Skip removals
|
||||
if (rewardShareTransactionData.getSharePercent() < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if not sponsored by this account
|
||||
if (!Arrays.equals(rewardShareTransactionData.getCreatorPublicKey(), accountData.getPublicKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip self shares
|
||||
if (Objects.equals(rewardShareTransactionData.getRecipient(), this.address)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean duplicateFound = false;
|
||||
for (RewardShareTransactionData existingRewardShare : sponsorshipRewardShares) {
|
||||
if (Objects.equals(existingRewardShare.getRecipient(), rewardShareTransactionData.getRecipient())) {
|
||||
// Duplicate
|
||||
duplicateFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!duplicateFound) {
|
||||
sponsorshipRewardShares.add(rewardShareTransactionData);
|
||||
this.sponsees.add(rewardShareTransactionData.getRecipient());
|
||||
}
|
||||
}
|
||||
|
||||
this.sponsorshipRewardShares = sponsorshipRewardShares;
|
||||
}
|
||||
|
||||
private List<TransactionData> fetchTransferPrivsForAddress(String address) throws DataException {
|
||||
return fetchTransactions(repository,
|
||||
List.of(TransactionType.TRANSFER_PRIVS),
|
||||
address, true);
|
||||
}
|
||||
|
||||
private void fetchPaymentsForAddress(String address) throws DataException {
|
||||
List<TransactionData> payments = fetchTransactions(repository,
|
||||
Arrays.asList(TransactionType.PAYMENT, TransactionType.TRANSFER_ASSET),
|
||||
address, false);
|
||||
this.paymentsByAddress.put(address, payments);
|
||||
}
|
||||
|
||||
private List<String> fetchOutgoingPaymentRecipientsForAddress(String address) {
|
||||
List<String> outgoingPaymentRecipients = new ArrayList<>();
|
||||
|
||||
List<TransactionData> transactionDataList = this.paymentsByAddress.get(address);
|
||||
if (transactionDataList == null) transactionDataList = new ArrayList<>();
|
||||
transactionDataList.removeIf(t -> t.getTimestamp() >= this.snapshotTimestamp);
|
||||
for (TransactionData transactionData : transactionDataList) {
|
||||
switch (transactionData.getType()) {
|
||||
|
||||
case PAYMENT:
|
||||
PaymentTransactionData paymentTransactionData = (PaymentTransactionData) transactionData;
|
||||
if (!Objects.equals(paymentTransactionData.getRecipient(), address)) {
|
||||
// Outgoing payment from this account
|
||||
outgoingPaymentRecipients.add(paymentTransactionData.getRecipient());
|
||||
}
|
||||
break;
|
||||
|
||||
case TRANSFER_ASSET:
|
||||
TransferAssetTransactionData transferAssetTransactionData = (TransferAssetTransactionData) transactionData;
|
||||
if (transferAssetTransactionData.getAssetId() == Asset.QORT) {
|
||||
if (!Objects.equals(transferAssetTransactionData.getRecipient(), address)) {
|
||||
// Outgoing payment from this account
|
||||
outgoingPaymentRecipients.add(transferAssetTransactionData.getRecipient());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return outgoingPaymentRecipients;
|
||||
}
|
||||
|
||||
private boolean hasZeroTransactions(String address) {
|
||||
List<TransactionData> transactionDataList = this.paymentsByAddress.get(address);
|
||||
if (transactionDataList == null) {
|
||||
return true;
|
||||
}
|
||||
transactionDataList.removeIf(t -> t.getTimestamp() >= this.snapshotTimestamp);
|
||||
return transactionDataList.size() == 0;
|
||||
}
|
||||
|
||||
private static List<TransactionData> fetchTransactions(Repository repository, List<TransactionType> txTypes, String address, boolean reverse) throws DataException {
|
||||
// Fetch all relevant transactions for this account
|
||||
List<byte[]> signatures = repository.getTransactionRepository()
|
||||
.getSignaturesMatchingCriteria(null, null, null, txTypes,
|
||||
null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED,
|
||||
null, null, reverse);
|
||||
|
||||
List<TransactionData> transactionDataList = new ArrayList<>();
|
||||
|
||||
for (byte[] signature : signatures) {
|
||||
// Fetch transaction data
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData == null) {
|
||||
continue;
|
||||
}
|
||||
transactionDataList.add(transactionData);
|
||||
}
|
||||
|
||||
return transactionDataList;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -79,7 +79,7 @@ public enum ApiError {
|
||||
// BUYER_ALREADY_OWNER(411, 422),
|
||||
|
||||
// POLLS
|
||||
// POLL_NO_EXISTS(501, 404),
|
||||
POLL_NO_EXISTS(501, 404),
|
||||
// POLL_ALREADY_EXISTS(502, 422),
|
||||
// DUPLICATE_OPTION(503, 422),
|
||||
// POLL_OPTION_NO_EXISTS(504, 404),
|
||||
|
||||
@@ -3,6 +3,7 @@ package org.qortal.api;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.io.Writer;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
@@ -20,14 +21,12 @@ import javax.net.ssl.SNIHostName;
|
||||
import javax.net.ssl.SNIServerName;
|
||||
import javax.net.ssl.SSLParameters;
|
||||
import javax.net.ssl.SSLSocket;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
import javax.xml.bind.UnmarshalException;
|
||||
import javax.xml.bind.Unmarshaller;
|
||||
import javax.xml.bind.*;
|
||||
import javax.xml.transform.stream.StreamSource;
|
||||
|
||||
import org.eclipse.persistence.exceptions.XMLMarshalException;
|
||||
import org.eclipse.persistence.jaxb.JAXBContextFactory;
|
||||
import org.eclipse.persistence.jaxb.MarshallerProperties;
|
||||
import org.eclipse.persistence.jaxb.UnmarshallerProperties;
|
||||
|
||||
public class ApiRequest {
|
||||
@@ -107,6 +106,36 @@ public class ApiRequest {
|
||||
}
|
||||
}
|
||||
|
||||
private static Marshaller createMarshaller(Class<?> objectClass) {
|
||||
try {
|
||||
// Create JAXB context aware of object's class
|
||||
JAXBContext jc = JAXBContextFactory.createContext(new Class[] { objectClass }, null);
|
||||
|
||||
// Create marshaller
|
||||
Marshaller marshaller = jc.createMarshaller();
|
||||
|
||||
// Set the marshaller media type to JSON
|
||||
marshaller.setProperty(MarshallerProperties.MEDIA_TYPE, "application/json");
|
||||
|
||||
// Tell marshaller not to include JSON root element in the output
|
||||
marshaller.setProperty(MarshallerProperties.JSON_INCLUDE_ROOT, false);
|
||||
|
||||
return marshaller;
|
||||
} catch (JAXBException e) {
|
||||
throw new RuntimeException("Unable to create API marshaller", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void marshall(Writer writer, Object object) throws IOException {
|
||||
Marshaller marshaller = createMarshaller(object.getClass());
|
||||
|
||||
try {
|
||||
marshaller.marshal(object, writer);
|
||||
} catch (JAXBException e) {
|
||||
throw new IOException("Unable to create marshall object for API", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static String getParamsString(Map<String, String> params) {
|
||||
StringBuilder result = new StringBuilder();
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ import java.security.SecureRandom;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
import org.checkerframework.checker.units.qual.A;
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
import org.eclipse.jetty.rewrite.handler.RedirectPatternRule;
|
||||
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
|
||||
@@ -40,13 +40,8 @@ import org.glassfish.jersey.server.ResourceConfig;
|
||||
import org.glassfish.jersey.servlet.ServletContainer;
|
||||
import org.qortal.api.resource.AnnotationPostProcessor;
|
||||
import org.qortal.api.resource.ApiDefinition;
|
||||
import org.qortal.api.websocket.ActiveChatsWebSocket;
|
||||
import org.qortal.api.websocket.AdminStatusWebSocket;
|
||||
import org.qortal.api.websocket.BlocksWebSocket;
|
||||
import org.qortal.api.websocket.ChatMessagesWebSocket;
|
||||
import org.qortal.api.websocket.PresenceWebSocket;
|
||||
import org.qortal.api.websocket.TradeBotWebSocket;
|
||||
import org.qortal.api.websocket.TradeOffersWebSocket;
|
||||
import org.qortal.api.websocket.*;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
public class ApiService {
|
||||
@@ -57,9 +52,11 @@ public class ApiService {
|
||||
private Server server;
|
||||
private ApiKey apiKey;
|
||||
|
||||
public static final String API_VERSION_HEADER = "X-API-VERSION";
|
||||
|
||||
private ApiService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource");
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.restricted.resource");
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
@@ -129,13 +126,13 @@ public class ApiService {
|
||||
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
|
||||
new DetectorConnectionFactory(sslConnectionFactory),
|
||||
httpConnectionFactory);
|
||||
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setPort(Settings.getInstance().getApiPort());
|
||||
|
||||
this.server.addConnector(portUnifiedConnector);
|
||||
} else {
|
||||
// Non-SSL
|
||||
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
|
||||
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
|
||||
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getApiPort());
|
||||
this.server = new Server(endpoint);
|
||||
}
|
||||
@@ -212,6 +209,9 @@ public class ApiService {
|
||||
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
|
||||
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");
|
||||
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
|
||||
context.addServlet(TradePresenceWebSocket.class, "/websockets/crosschain/tradepresence");
|
||||
|
||||
// Deprecated
|
||||
context.addServlet(PresenceWebSocket.class, "/websockets/presence");
|
||||
|
||||
// Start server
|
||||
@@ -233,4 +233,19 @@ public class ApiService {
|
||||
this.server = null;
|
||||
}
|
||||
|
||||
public static int getApiVersion(HttpServletRequest request) {
|
||||
// Get API version
|
||||
String apiVersionString = request.getHeader(API_VERSION_HEADER);
|
||||
if (apiVersionString == null) {
|
||||
// Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141
|
||||
apiVersionString = request.getParameter("apiVersion");
|
||||
}
|
||||
|
||||
int apiVersion = 1;
|
||||
if (apiVersionString != null) {
|
||||
apiVersion = Integer.parseInt(apiVersionString);
|
||||
}
|
||||
return apiVersion;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package org.qortal.api;
|
||||
import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource;
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
|
||||
import org.eclipse.jetty.rewrite.handler.RewritePatternRule;
|
||||
import org.eclipse.jetty.server.*;
|
||||
import org.eclipse.jetty.server.handler.ErrorHandler;
|
||||
import org.eclipse.jetty.server.handler.InetAccessHandler;
|
||||
@@ -16,6 +15,7 @@ import org.glassfish.jersey.server.ResourceConfig;
|
||||
import org.glassfish.jersey.servlet.ServletContainer;
|
||||
import org.qortal.api.resource.AnnotationPostProcessor;
|
||||
import org.qortal.api.resource.ApiDefinition;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
@@ -38,7 +38,7 @@ public class DomainMapService {
|
||||
|
||||
private DomainMapService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.domainmap.resource");
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.domainmap.resource");
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
@@ -99,13 +99,13 @@ public class DomainMapService {
|
||||
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
|
||||
new DetectorConnectionFactory(sslConnectionFactory),
|
||||
httpConnectionFactory);
|
||||
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setPort(Settings.getInstance().getDomainMapPort());
|
||||
|
||||
this.server.addConnector(portUnifiedConnector);
|
||||
} else {
|
||||
// Non-SSL
|
||||
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
|
||||
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
|
||||
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getDomainMapPort());
|
||||
this.server = new Server(endpoint);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import org.glassfish.jersey.server.ResourceConfig;
|
||||
import org.glassfish.jersey.servlet.ServletContainer;
|
||||
import org.qortal.api.resource.AnnotationPostProcessor;
|
||||
import org.qortal.api.resource.ApiDefinition;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
@@ -37,7 +38,7 @@ public class GatewayService {
|
||||
|
||||
private GatewayService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.gateway.resource");
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.gateway.resource");
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
@@ -98,13 +99,13 @@ public class GatewayService {
|
||||
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
|
||||
new DetectorConnectionFactory(sslConnectionFactory),
|
||||
httpConnectionFactory);
|
||||
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setHost(Network.getInstance().getBindAddress());
|
||||
portUnifiedConnector.setPort(Settings.getInstance().getGatewayPort());
|
||||
|
||||
this.server.addConnector(portUnifiedConnector);
|
||||
} else {
|
||||
// Non-SSL
|
||||
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
|
||||
InetAddress bindAddr = InetAddress.getByName(Network.getInstance().getBindAddress());
|
||||
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getGatewayPort());
|
||||
this.server = new Server(endpoint);
|
||||
}
|
||||
|
||||
@@ -5,29 +5,74 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.select.Elements;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class HTMLParser {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
|
||||
|
||||
private String linkPrefix;
|
||||
private String qdnBase;
|
||||
private String qdnBaseWithPath;
|
||||
private byte[] data;
|
||||
private String qdnContext;
|
||||
private String resourceId;
|
||||
private Service service;
|
||||
private String identifier;
|
||||
private String path;
|
||||
private String theme;
|
||||
private boolean usingCustomRouting;
|
||||
|
||||
public HTMLParser(String resourceId, String inPath, String prefix, boolean usePrefix, byte[] data) {
|
||||
String inPathWithoutFilename = inPath.substring(0, inPath.lastIndexOf('/'));
|
||||
this.linkPrefix = usePrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : "";
|
||||
public HTMLParser(String resourceId, String inPath, String prefix, boolean usePrefix, byte[] data,
|
||||
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting) {
|
||||
String inPathWithoutFilename = inPath.contains("/") ? inPath.substring(0, inPath.lastIndexOf('/')) : "";
|
||||
this.qdnBase = usePrefix ? String.format("%s/%s", prefix, resourceId) : "";
|
||||
this.qdnBaseWithPath = usePrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : "";
|
||||
this.data = data;
|
||||
this.qdnContext = qdnContext;
|
||||
this.resourceId = resourceId;
|
||||
this.service = service;
|
||||
this.identifier = identifier;
|
||||
this.path = inPath;
|
||||
this.theme = theme;
|
||||
this.usingCustomRouting = usingCustomRouting;
|
||||
}
|
||||
|
||||
public void addAdditionalHeaderTags() {
|
||||
String fileContents = new String(data);
|
||||
Document document = Jsoup.parse(fileContents);
|
||||
String baseUrl = this.linkPrefix + "/";
|
||||
Elements head = document.getElementsByTag("head");
|
||||
if (!head.isEmpty()) {
|
||||
// Add q-apps script tag
|
||||
String qAppsScriptElement = String.format("<script src=\"/apps/q-apps.js?time=%d\">", System.currentTimeMillis());
|
||||
head.get(0).prepend(qAppsScriptElement);
|
||||
|
||||
// Add q-apps gateway script tag if in gateway mode
|
||||
if (Objects.equals(this.qdnContext, "gateway")) {
|
||||
String qAppsGatewayScriptElement = String.format("<script src=\"/apps/q-apps-gateway.js?time=%d\">", System.currentTimeMillis());
|
||||
head.get(0).prepend(qAppsGatewayScriptElement);
|
||||
}
|
||||
|
||||
// Escape and add vars
|
||||
String service = this.service.toString().replace("\"","\\\"");
|
||||
String name = this.resourceId != null ? this.resourceId.replace("\"","\\\"") : "";
|
||||
String identifier = this.identifier != null ? this.identifier.replace("\"","\\\"") : "";
|
||||
String path = this.path != null ? this.path.replace("\"","\\\"") : "";
|
||||
String theme = this.theme != null ? this.theme.replace("\"","\\\"") : "";
|
||||
String qdnContextVar = String.format("<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>", this.qdnContext, theme, service, name, identifier, path, this.qdnBase, this.qdnBaseWithPath);
|
||||
head.get(0).prepend(qdnContextVar);
|
||||
|
||||
// Add base href tag
|
||||
String baseElement = String.format("<base href=\"%s\">", baseUrl);
|
||||
// Exclude the path if this request was routed back to the index automatically
|
||||
String baseHref = this.usingCustomRouting ? this.qdnBase : this.qdnBaseWithPath;
|
||||
String baseElement = String.format("<base href=\"%s/\">", baseHref);
|
||||
head.get(0).prepend(baseElement);
|
||||
|
||||
// Add meta charset tag
|
||||
String metaCharsetElement = "<meta charset=\"UTF-8\">";
|
||||
head.get(0).prepend(metaCharsetElement);
|
||||
|
||||
}
|
||||
String html = document.html();
|
||||
this.data = html.getBytes();
|
||||
|
||||
@@ -15,7 +15,21 @@ public abstract class Security {
|
||||
|
||||
public static final String API_KEY_HEADER = "X-API-KEY";
|
||||
|
||||
/**
|
||||
* Check API call is allowed, retrieving the API key from the request header or GET/POST parameters where required
|
||||
* @param request
|
||||
*/
|
||||
public static void checkApiCallAllowed(HttpServletRequest request) {
|
||||
checkApiCallAllowed(request, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check API call is allowed, retrieving the API key first from the passedApiKey parameter, with a fallback
|
||||
* to the request header or GET/POST parameters when null.
|
||||
* @param request
|
||||
* @param passedApiKey - the API key to test, or null if it should be retrieved from the request headers.
|
||||
*/
|
||||
public static void checkApiCallAllowed(HttpServletRequest request, String passedApiKey) {
|
||||
// We may want to allow automatic authentication for local requests, if enabled in settings
|
||||
boolean localAuthBypassEnabled = Settings.getInstance().isLocalAuthBypassEnabled();
|
||||
if (localAuthBypassEnabled) {
|
||||
@@ -38,7 +52,10 @@ public abstract class Security {
|
||||
}
|
||||
|
||||
// We require an API key to be passed
|
||||
String passedApiKey = request.getHeader(API_KEY_HEADER);
|
||||
if (passedApiKey == null) {
|
||||
// API call not passed as a parameter, so try the header
|
||||
passedApiKey = request.getHeader(API_KEY_HEADER);
|
||||
}
|
||||
if (passedApiKey == null) {
|
||||
// Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141
|
||||
passedApiKey = request.getParameter("apiKey");
|
||||
@@ -56,7 +73,7 @@ public abstract class Security {
|
||||
public static void disallowLoopbackRequests(HttpServletRequest request) {
|
||||
try {
|
||||
InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr());
|
||||
if (remoteAddr.isLoopbackAddress()) {
|
||||
if (remoteAddr.isLoopbackAddress() && !Settings.getInstance().isGatewayLoopbackEnabled()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Local requests not allowed");
|
||||
}
|
||||
} catch (UnknownHostException e) {
|
||||
@@ -84,9 +101,9 @@ public abstract class Security {
|
||||
}
|
||||
}
|
||||
|
||||
public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier) {
|
||||
public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier, String apiKey) {
|
||||
try {
|
||||
Security.checkApiCallAllowed(request);
|
||||
Security.checkApiCallAllowed(request, apiKey);
|
||||
|
||||
} catch (ApiException e) {
|
||||
// API call wasn't allowed, but maybe it was pre-authorized
|
||||
|
||||
@@ -17,7 +17,7 @@ import java.util.Map;
|
||||
|
||||
|
||||
@Path("/")
|
||||
@Tag(name = "Gateway")
|
||||
@Tag(name = "Domain Map")
|
||||
public class DomainMapResource {
|
||||
|
||||
@Context HttpServletRequest request;
|
||||
@@ -42,16 +42,16 @@ public class DomainMapResource {
|
||||
// Build synchronously, so that we don't need to make the summary API endpoints available over
|
||||
// the domain map server. This means that there will be no loading screen, but this is potentially
|
||||
// preferred in this situation anyway (e.g. to avoid confusing search engine robots).
|
||||
return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", false, false);
|
||||
return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, null, inPath, null, "", false, false);
|
||||
}
|
||||
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
|
||||
}
|
||||
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
|
||||
String secret58, String prefix, boolean usePrefix, boolean async) {
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
|
||||
String inPath, String secret58, String prefix, boolean usePrefix, boolean async) {
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
|
||||
secret58, prefix, usePrefix, async, request, response, context);
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
|
||||
secret58, prefix, usePrefix, async, "domainMap", request, response, context);
|
||||
return renderer.render();
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package org.qortal.api.gateway.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
|
||||
@@ -16,6 +17,10 @@ import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
@Path("/")
|
||||
@@ -76,50 +81,83 @@ public class GatewayResource {
|
||||
|
||||
|
||||
@GET
|
||||
@Path("{name}/{path:.*}")
|
||||
@Path("{path:.*}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathByName(@PathParam("name") String name,
|
||||
@PathParam("path") String inPath) {
|
||||
public HttpServletResponse getPath(@PathParam("path") String inPath) {
|
||||
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
|
||||
Security.disallowLoopbackRequests(request);
|
||||
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", true, true);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("{name}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexByName(@PathParam("name") String name) {
|
||||
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
|
||||
Security.disallowLoopbackRequests(request);
|
||||
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "", true, true);
|
||||
}
|
||||
|
||||
|
||||
// Optional /site alternative for backwards support
|
||||
|
||||
@GET
|
||||
@Path("/site/{name}/{path:.*}")
|
||||
public HttpServletResponse getSitePathByName(@PathParam("name") String name,
|
||||
@PathParam("path") String inPath) {
|
||||
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
|
||||
Security.disallowLoopbackRequests(request);
|
||||
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "/site", true, true);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/site/{name}")
|
||||
public HttpServletResponse getSiteIndexByName(@PathParam("name") String name) {
|
||||
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
|
||||
Security.disallowLoopbackRequests(request);
|
||||
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "/site", true, true);
|
||||
return this.parsePath(inPath, "gateway", null, true, true);
|
||||
}
|
||||
|
||||
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
|
||||
String secret58, String prefix, boolean usePrefix, boolean async) {
|
||||
private HttpServletResponse parsePath(String inPath, String qdnContext, String secret58, boolean usePrefix, boolean async) {
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
|
||||
secret58, prefix, usePrefix, async, request, response, context);
|
||||
if (inPath == null || inPath.equals("")) {
|
||||
// Assume not a real file
|
||||
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
|
||||
}
|
||||
|
||||
// Default service is WEBSITE
|
||||
Service service = Service.WEBSITE;
|
||||
String name = null;
|
||||
String identifier = null;
|
||||
String outPath = "";
|
||||
List<String> prefixParts = new ArrayList<>();
|
||||
|
||||
if (!inPath.contains("/")) {
|
||||
// Assume entire inPath is a registered name
|
||||
name = inPath;
|
||||
}
|
||||
else {
|
||||
// Parse the path to determine what we need to load
|
||||
List<String> parts = new LinkedList<>(Arrays.asList(inPath.split("/")));
|
||||
|
||||
// Check if the first element is a service
|
||||
try {
|
||||
Service parsedService = Service.valueOf(parts.get(0).toUpperCase());
|
||||
if (parsedService != null) {
|
||||
// First element matches a service, so we can assume it is one
|
||||
service = parsedService;
|
||||
parts.remove(0);
|
||||
prefixParts.add(service.name());
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Not a service
|
||||
}
|
||||
|
||||
if (parts.isEmpty()) {
|
||||
// We need more than just a service
|
||||
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
|
||||
}
|
||||
|
||||
// Service is removed, so assume first element is now a registered name
|
||||
name = parts.get(0);
|
||||
parts.remove(0);
|
||||
|
||||
if (!parts.isEmpty()) {
|
||||
// Name is removed, so check if the first element is now an identifier
|
||||
ArbitraryResourceStatus status = this.getStatus(service, name, parts.get(0), false);
|
||||
if (status.getTotalChunkCount() > 0) {
|
||||
// Matched service, name and identifier combination - so assume this is an identifier and can be removed
|
||||
identifier = parts.get(0);
|
||||
parts.remove(0);
|
||||
prefixParts.add(identifier);
|
||||
}
|
||||
}
|
||||
|
||||
if (!parts.isEmpty()) {
|
||||
// outPath can be built by combining any remaining parts
|
||||
outPath = String.join("/", parts);
|
||||
}
|
||||
}
|
||||
|
||||
String prefix = StringUtils.join(prefixParts, "/");
|
||||
if (prefix != null && prefix.length() > 0) {
|
||||
prefix = "/" + prefix;
|
||||
}
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(name, ResourceIdType.NAME, service, identifier, outPath,
|
||||
secret58, prefix, usePrefix, async, qdnContext, request, response, context);
|
||||
return renderer.render();
|
||||
}
|
||||
|
||||
|
||||
56
src/main/java/org/qortal/api/model/AccountPenaltyStats.java
Normal file
56
src/main/java/org/qortal/api/model/AccountPenaltyStats.java
Normal file
@@ -0,0 +1,56 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import org.qortal.block.SelfSponsorshipAlgoV1Block;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.naming.NameData;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AccountPenaltyStats {
|
||||
|
||||
public Integer totalPenalties;
|
||||
public Integer maxPenalty;
|
||||
public Integer minPenalty;
|
||||
public String penaltyHash;
|
||||
|
||||
protected AccountPenaltyStats() {
|
||||
}
|
||||
|
||||
public AccountPenaltyStats(Integer totalPenalties, Integer maxPenalty, Integer minPenalty, String penaltyHash) {
|
||||
this.totalPenalties = totalPenalties;
|
||||
this.maxPenalty = maxPenalty;
|
||||
this.minPenalty = minPenalty;
|
||||
this.penaltyHash = penaltyHash;
|
||||
}
|
||||
|
||||
public static AccountPenaltyStats fromAccounts(List<AccountData> accounts) {
|
||||
int totalPenalties = 0;
|
||||
Integer maxPenalty = null;
|
||||
Integer minPenalty = null;
|
||||
|
||||
List<String> addresses = new ArrayList<>();
|
||||
for (AccountData accountData : accounts) {
|
||||
int penalty = accountData.getBlocksMintedPenalty();
|
||||
addresses.add(accountData.getAddress());
|
||||
totalPenalties++;
|
||||
|
||||
// Penalties are expressed as a negative number, so the min and the max are reversed here
|
||||
if (maxPenalty == null || penalty < maxPenalty) maxPenalty = penalty;
|
||||
if (minPenalty == null || penalty > minPenalty) minPenalty = penalty;
|
||||
}
|
||||
|
||||
String penaltyHash = SelfSponsorshipAlgoV1Block.getHash(addresses);
|
||||
return new AccountPenaltyStats(totalPenalties, maxPenalty, minPenalty, penaltyHash);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("totalPenalties: %d, maxPenalty: %d, minPenalty: %d, penaltyHash: %s", totalPenalties, maxPenalty, minPenalty, penaltyHash == null ? "null" : penaltyHash);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,8 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import org.qortal.data.network.PeerChainTipData;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.network.PeerData;
|
||||
import org.qortal.network.Handshake;
|
||||
import org.qortal.network.Peer;
|
||||
@@ -36,6 +37,7 @@ public class ConnectedPeer {
|
||||
public Long lastBlockTimestamp;
|
||||
public UUID connectionId;
|
||||
public String age;
|
||||
public Boolean isTooDivergent;
|
||||
|
||||
protected ConnectedPeer() {
|
||||
}
|
||||
@@ -63,11 +65,16 @@ public class ConnectedPeer {
|
||||
this.age = "connecting...";
|
||||
}
|
||||
|
||||
PeerChainTipData peerChainTipData = peer.getChainTipData();
|
||||
BlockSummaryData peerChainTipData = peer.getChainTipData();
|
||||
if (peerChainTipData != null) {
|
||||
this.lastHeight = peerChainTipData.getLastHeight();
|
||||
this.lastBlockSignature = peerChainTipData.getLastBlockSignature();
|
||||
this.lastBlockTimestamp = peerChainTipData.getLastBlockTimestamp();
|
||||
this.lastHeight = peerChainTipData.getHeight();
|
||||
this.lastBlockSignature = peerChainTipData.getSignature();
|
||||
this.lastBlockTimestamp = peerChainTipData.getTimestamp();
|
||||
}
|
||||
|
||||
// Only include isTooDivergent decision if we've had the opportunity to request block summaries this peer
|
||||
if (peer.getLastTooDivergentTime() != null) {
|
||||
this.isTooDivergent = Controller.wasRecentlyTooDivergent.test(peer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
16
src/main/java/org/qortal/api/model/FileProperties.java
Normal file
16
src/main/java/org/qortal/api/model/FileProperties.java
Normal file
@@ -0,0 +1,16 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class FileProperties {
|
||||
|
||||
public String filename;
|
||||
public String mimeType;
|
||||
public Long size;
|
||||
|
||||
public FileProperties() {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -12,6 +12,7 @@ public class NodeInfo {
|
||||
public long buildTimestamp;
|
||||
public String nodeId;
|
||||
public boolean isTestNet;
|
||||
public String type;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.OnlineAccountsManager;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.network.Network;
|
||||
|
||||
@@ -21,12 +22,12 @@ public class NodeStatus {
|
||||
public final int height;
|
||||
|
||||
public NodeStatus() {
|
||||
this.isMintingPossible = Controller.getInstance().isMintingPossible();
|
||||
this.isMintingPossible = OnlineAccountsManager.getInstance().hasActiveOnlineAccountSignatures();
|
||||
|
||||
this.syncPercent = Synchronizer.getInstance().getSyncPercent();
|
||||
this.isSynchronizing = this.syncPercent != null;
|
||||
this.isSynchronizing = Synchronizer.getInstance().isSynchronizing();
|
||||
|
||||
this.numberOfConnections = Network.getInstance().getHandshakedPeers().size();
|
||||
this.numberOfConnections = Network.getInstance().getImmutableHandshakedPeers().size();
|
||||
|
||||
this.height = Controller.getInstance().getChainHeight();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package org.qortal.api.model.crosschain;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class DigibyteSendRequest {
|
||||
|
||||
@Schema(description = "Digibyte BIP32 extended private key", example = "tprv___________________________________________________________________________________________________________")
|
||||
public String xprv58;
|
||||
|
||||
@Schema(description = "Recipient's Digibyte address ('legacy' P2PKH only)", example = "1DigByteEaterAddressDontSendf59kuE")
|
||||
public String receivingAddress;
|
||||
|
||||
@Schema(description = "Amount of DGB to send", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long digibyteAmount;
|
||||
|
||||
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 DGB (100 sats) per byte", example = "0.00000100", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public Long feePerByte;
|
||||
|
||||
public DigibyteSendRequest() {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package org.qortal.api.model.crosschain;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class PirateChainSendRequest {
|
||||
|
||||
@Schema(description = "32 bytes of entropy, Base58 encoded", example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV")
|
||||
public String entropy58;
|
||||
|
||||
@Schema(description = "Recipient's Pirate Chain address", example = "zc...")
|
||||
public String receivingAddress;
|
||||
|
||||
@Schema(description = "Amount of ARRR to send", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long arrrAmount;
|
||||
|
||||
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 ARRR (100 sats) per byte", example = "0.00000100", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public Long feePerByte;
|
||||
|
||||
@Schema(description = "Optional memo to include information for the recipient", example = "zc...")
|
||||
public String memo;
|
||||
|
||||
public PirateChainSendRequest() {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package org.qortal.api.model.crosschain;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class RavencoinSendRequest {
|
||||
|
||||
@Schema(description = "Ravencoin BIP32 extended private key", example = "tprv___________________________________________________________________________________________________________")
|
||||
public String xprv58;
|
||||
|
||||
@Schema(description = "Recipient's Ravencoin address ('legacy' P2PKH only)", example = "1RvnCoinEaterAddressDontSendf59kuE")
|
||||
public String receivingAddress;
|
||||
|
||||
@Schema(description = "Amount of RVN to send", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long ravencoinAmount;
|
||||
|
||||
@Schema(description = "Transaction fee per byte (optional). Default is 0.00000100 RVN (100 sats) per byte", example = "0.00000100", type = "number")
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public Long feePerByte;
|
||||
|
||||
public RavencoinSendRequest() {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.*;
|
||||
@@ -27,12 +28,15 @@ import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiException;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.AccountPenaltyStats;
|
||||
import org.qortal.api.model.ApiOnlineAccount;
|
||||
import org.qortal.api.model.RewardShareKeyRequest;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.controller.OnlineAccountsManager;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.AccountPenaltyData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.data.network.OnlineAccountLevel;
|
||||
@@ -109,18 +113,26 @@ public class AddressesResource {
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
byte[] lastReference = null;
|
||||
AccountData accountData;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
|
||||
|
||||
lastReference = accountData.getReference();
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
if (Settings.getInstance().isLite()) {
|
||||
// Lite nodes request data from peers instead of the local db
|
||||
accountData = LiteNode.getInstance().fetchAccountData(address);
|
||||
}
|
||||
else {
|
||||
// All other node types request data from local db
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
accountData = repository.getAccountRepository().getAccount(address);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
// Not found?
|
||||
if (accountData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
|
||||
|
||||
byte[] lastReference = accountData.getReference();
|
||||
|
||||
if (lastReference == null || lastReference.length == 0)
|
||||
return "false";
|
||||
@@ -156,7 +168,7 @@ public class AddressesResource {
|
||||
)
|
||||
@ApiErrors({ApiError.PUBLIC_KEY_NOT_FOUND, ApiError.REPOSITORY_ISSUE})
|
||||
public List<ApiOnlineAccount> getOnlineAccounts() {
|
||||
List<OnlineAccountData> onlineAccounts = Controller.getInstance().getOnlineAccounts();
|
||||
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts();
|
||||
|
||||
// Map OnlineAccountData entries to OnlineAccount via reward-share data
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@@ -191,14 +203,18 @@ public class AddressesResource {
|
||||
)
|
||||
@ApiErrors({ApiError.PUBLIC_KEY_NOT_FOUND, ApiError.REPOSITORY_ISSUE})
|
||||
public List<OnlineAccountLevel> getOnlineAccountsByLevel() {
|
||||
List<OnlineAccountData> onlineAccounts = Controller.getInstance().getOnlineAccounts();
|
||||
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<OnlineAccountLevel> onlineAccountLevels = new ArrayList<>();
|
||||
|
||||
// Prepopulate all levels
|
||||
for (int i=0; i<=10; i++)
|
||||
onlineAccountLevels.add(new OnlineAccountLevel(i, 0));
|
||||
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
try {
|
||||
final int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, onlineAccountData.getPublicKey());
|
||||
final int minterLevel = Account.getRewardShareEffectiveMintingLevelIncludingLevelZero(repository, onlineAccountData.getPublicKey());
|
||||
|
||||
OnlineAccountLevel onlineAccountLevel = onlineAccountLevels.stream()
|
||||
.filter(a -> a.getLevel() == minterLevel)
|
||||
@@ -458,6 +474,54 @@ public class AddressesResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/penalties")
|
||||
@Operation(
|
||||
summary = "Get addresses with penalties",
|
||||
description = "Returns a list of accounts with a blocksMintedPenalty",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "accounts with penalties",
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = AccountPenaltyData.class)))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public List<AccountPenaltyData> getAccountsWithPenalties() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
|
||||
List<AccountPenaltyData> penalties = accounts.stream().map(a -> new AccountPenaltyData(a.getAddress(), a.getBlocksMintedPenalty())).collect(Collectors.toList());
|
||||
|
||||
return penalties;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/penalties/stats")
|
||||
@Operation(
|
||||
summary = "Get stats about current penalties",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "aggregated stats about accounts with penalties",
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = AccountPenaltyStats.class)))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public AccountPenaltyStats getPenaltyStats() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
|
||||
return AccountPenaltyStats.fromAccounts(accounts);
|
||||
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/publicize")
|
||||
@Operation(
|
||||
|
||||
83
src/main/java/org/qortal/api/resource/AppsResource.java
Normal file
83
src/main/java/org/qortal/api/resource/AppsResource.java
Normal file
@@ -0,0 +1,83 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import com.google.common.io.Resources;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.Hidden;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.qortal.api.*;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
|
||||
@Path("/apps")
|
||||
@Tag(name = "Apps")
|
||||
public class AppsResource {
|
||||
|
||||
@Context HttpServletRequest request;
|
||||
@Context HttpServletResponse response;
|
||||
@Context ServletContext context;
|
||||
|
||||
@GET
|
||||
@Path("/q-apps.js")
|
||||
@Hidden // For internal Q-App API use only
|
||||
@Operation(
|
||||
summary = "Javascript interface for Q-Apps",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "javascript",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getQAppsJs() {
|
||||
URL url = Resources.getResource("q-apps/q-apps.js");
|
||||
try {
|
||||
return Resources.toString(url, StandardCharsets.UTF_8);
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/q-apps-gateway.js")
|
||||
@Hidden // For internal Q-App API use only
|
||||
@Operation(
|
||||
summary = "Gateway-specific interface for Q-Apps",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "javascript",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getQAppsGatewayJs() {
|
||||
URL url = Resources.getResource("q-apps/q-apps-gateway.js");
|
||||
try {
|
||||
return Resources.toString(url, StandardCharsets.UTF_8);
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import com.google.common.primitives.Bytes;
|
||||
import com.j256.simplemagic.ContentInfo;
|
||||
import com.j256.simplemagic.ContentInfoUtil;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
@@ -12,10 +14,14 @@ import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.FileNameMap;
|
||||
import java.net.URLConnection;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -24,25 +30,30 @@ import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.util.encoders.Base64;
|
||||
import org.qortal.api.*;
|
||||
import org.qortal.api.model.FileProperties;
|
||||
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
|
||||
import org.qortal.arbitrary.*;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryMetadataManager;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceNameInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||
import org.qortal.data.arbitrary.*;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.list.ResourceListManager;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
@@ -54,8 +65,7 @@ import org.qortal.transaction.Transaction.ValidationResult;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.ZipUtils;
|
||||
import org.qortal.utils.*;
|
||||
|
||||
@Path("/arbitrary")
|
||||
@Tag(name = "Arbitrary")
|
||||
@@ -83,12 +93,17 @@ public class ArbitraryResource {
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public List<ArbitraryResourceInfo> getResources(
|
||||
@QueryParam("service") Service service,
|
||||
@QueryParam("name") String name,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) {
|
||||
@Parameter(description = "Include followed names only") @QueryParam("followedonly") Boolean followedOnly,
|
||||
@Parameter(description = "Exclude blocked content") @QueryParam("excludeblocked") Boolean excludeBlocked,
|
||||
@Parameter(description = "Filter names by list") @QueryParam("namefilter") String nameListFilter,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
|
||||
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
@@ -103,15 +118,33 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource");
|
||||
}
|
||||
|
||||
// Set up name filters if supplied
|
||||
List<String> names = null;
|
||||
if (name != null) {
|
||||
// Filter using single name
|
||||
names = Arrays.asList(name);
|
||||
}
|
||||
else if (nameListFilter != null) {
|
||||
// Filter using supplied list of names
|
||||
names = ResourceListManager.getInstance().getStringsInList(nameListFilter);
|
||||
if (names.isEmpty()) {
|
||||
// If list is empty (or doesn't exist) we can shortcut with empty response
|
||||
return new ArrayList<>();
|
||||
}
|
||||
}
|
||||
|
||||
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
|
||||
.getArbitraryResources(service, identifier, null, defaultRes, limit, offset, reverse);
|
||||
.getArbitraryResources(service, identifier, names, defaultRes, followedOnly, excludeBlocked, limit, offset, reverse);
|
||||
|
||||
if (resources == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
if (includeStatus != null && includeStatus == true) {
|
||||
resources = this.addStatusToResources(resources);
|
||||
if (includeStatus != null && includeStatus) {
|
||||
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
|
||||
}
|
||||
if (includeMetadata != null && includeMetadata) {
|
||||
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
|
||||
}
|
||||
|
||||
return resources;
|
||||
@@ -135,26 +168,56 @@ public class ArbitraryResource {
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public List<ArbitraryResourceInfo> searchResources(
|
||||
@QueryParam("service") Service service,
|
||||
@QueryParam("query") String query,
|
||||
@Parameter(description = "Query (searches both name and identifier fields)") @QueryParam("query") String query,
|
||||
@Parameter(description = "Identifier (searches identifier field only)") @QueryParam("identifier") String identifier,
|
||||
@Parameter(description = "Name (searches name field only)") @QueryParam("name") List<String> names,
|
||||
@Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
|
||||
@Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly,
|
||||
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
|
||||
@Parameter(description = "Filter names by list (exact matches only)") @QueryParam("namefilter") String nameListFilter,
|
||||
@Parameter(description = "Include followed names only") @QueryParam("followedonly") Boolean followedOnly,
|
||||
@Parameter(description = "Exclude blocked content") @QueryParam("excludeblocked") Boolean excludeBlocked,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
|
||||
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) {
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
boolean defaultRes = Boolean.TRUE.equals(defaultResource);
|
||||
boolean usePrefixOnly = Boolean.TRUE.equals(prefixOnly);
|
||||
|
||||
List<String> exactMatchNames = new ArrayList<>();
|
||||
|
||||
if (nameListFilter != null) {
|
||||
// Load names from supplied list of names
|
||||
exactMatchNames.addAll(ResourceListManager.getInstance().getStringsInList(nameListFilter));
|
||||
|
||||
// If list is empty (or doesn't exist) we can shortcut with empty response
|
||||
if (exactMatchNames.isEmpty()) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
}
|
||||
|
||||
// Move names to exact match list, if requested
|
||||
if (exactMatchNamesOnly != null && exactMatchNamesOnly && names != null) {
|
||||
exactMatchNames.addAll(names);
|
||||
names = null;
|
||||
}
|
||||
|
||||
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
|
||||
.searchArbitraryResources(service, query, defaultRes, limit, offset, reverse);
|
||||
.searchArbitraryResources(service, query, identifier, names, usePrefixOnly, exactMatchNames, defaultRes, followedOnly, excludeBlocked, limit, offset, reverse);
|
||||
|
||||
if (resources == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
if (includeStatus != null && includeStatus == true) {
|
||||
resources = this.addStatusToResources(resources);
|
||||
if (includeStatus != null && includeStatus) {
|
||||
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
|
||||
}
|
||||
if (includeMetadata != null && includeMetadata) {
|
||||
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
|
||||
}
|
||||
|
||||
return resources;
|
||||
@@ -164,62 +227,6 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/resources/names")
|
||||
@Operation(
|
||||
summary = "List arbitrary resources available on chain, grouped by creator's name",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceInfo.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public List<ArbitraryResourceNameInfo> getResourcesGroupedByName(
|
||||
@QueryParam("service") Service service,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Treat empty identifier as null
|
||||
if (identifier != null && identifier.isEmpty()) {
|
||||
identifier = null;
|
||||
}
|
||||
|
||||
// Ensure that "default" and "identifier" parameters cannot coexist
|
||||
boolean defaultRes = Boolean.TRUE.equals(defaultResource);
|
||||
if (defaultRes == true && identifier != null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "identifier cannot be specified when requesting a default resource");
|
||||
}
|
||||
|
||||
List<ArbitraryResourceNameInfo> creatorNames = repository.getArbitraryRepository()
|
||||
.getArbitraryResourceCreatorNames(service, identifier, defaultRes, limit, offset, reverse);
|
||||
|
||||
for (ArbitraryResourceNameInfo creatorName : creatorNames) {
|
||||
String name = creatorName.name;
|
||||
if (name != null) {
|
||||
List<ArbitraryResourceInfo> resources = repository.getArbitraryRepository()
|
||||
.getArbitraryResources(service, identifier, name, defaultRes, null, null, reverse);
|
||||
|
||||
if (includeStatus != null && includeStatus == true) {
|
||||
resources = this.addStatusToResources(resources);
|
||||
}
|
||||
creatorName.resources = resources;
|
||||
}
|
||||
}
|
||||
|
||||
return creatorNames;
|
||||
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/resource/status/{service}/{name}")
|
||||
@Operation(
|
||||
@@ -237,8 +244,33 @@ public class ArbitraryResource {
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("build") Boolean build) {
|
||||
|
||||
Security.requirePriorAuthorizationOrApiKey(request, name, service, null);
|
||||
return this.getStatus(service, name, null, build);
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorizationOrApiKey(request, name, service, null, apiKey);
|
||||
|
||||
return ArbitraryTransactionUtils.getStatus(service, name, null, build);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/resource/properties/{service}/{name}/{identifier}")
|
||||
@Operation(
|
||||
summary = "Get properties of a QDN resource",
|
||||
description = "This attempts a download of the data if it's not available locally. A filename will only be returned for single file resources. mimeType is only returned when it can be determined.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = FileProperties.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public FileProperties getResourceProperties(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier) {
|
||||
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier, apiKey);
|
||||
|
||||
return this.getFileProperties(service, name, identifier);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -259,8 +291,10 @@ public class ArbitraryResource {
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("build") Boolean build) {
|
||||
|
||||
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier);
|
||||
return this.getStatus(service, name, identifier, build);
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier, apiKey);
|
||||
|
||||
return ArbitraryTransactionUtils.getStatus(service, name, identifier, build);
|
||||
}
|
||||
|
||||
|
||||
@@ -390,6 +424,28 @@ public class ArbitraryResource {
|
||||
return Settings.getInstance().isRelayModeEnabled();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/categories")
|
||||
@Operation(
|
||||
summary = "List arbitrary transaction categories",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryCategoryInfo.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public List<ArbitraryCategoryInfo> getCategories() {
|
||||
List<ArbitraryCategoryInfo> categories = new ArrayList<>();
|
||||
for (Category category : Category.values()) {
|
||||
ArbitraryCategoryInfo arbitraryCategory = new ArbitraryCategoryInfo();
|
||||
arbitraryCategory.id = category.toString();
|
||||
arbitraryCategory.name = category.getName();
|
||||
categories.add(arbitraryCategory);
|
||||
}
|
||||
return categories;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/hosted/transactions")
|
||||
@Operation(
|
||||
@@ -431,16 +487,28 @@ public class ArbitraryResource {
|
||||
public List<ArbitraryResourceInfo> getHostedResources(
|
||||
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@Parameter(description = "Include status") @QueryParam("includestatus") Boolean includeStatus,
|
||||
@Parameter(description = "Include metadata") @QueryParam("includemetadata") Boolean includeMetadata,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset) {
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@QueryParam("query") String query) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
List<ArbitraryResourceInfo> resources = new ArrayList<>();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList;
|
||||
|
||||
if (query == null || query.equals("")) {
|
||||
transactionDataList = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository, limit, offset);
|
||||
} else {
|
||||
transactionDataList = ArbitraryDataStorageManager.getInstance().searchHostedTransactions(repository,query, limit, offset);
|
||||
}
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository, limit, offset);
|
||||
for (ArbitraryTransactionData transactionData : transactionDataList) {
|
||||
if (transactionData.getService() == null) {
|
||||
continue;
|
||||
}
|
||||
ArbitraryResourceInfo arbitraryResourceInfo = new ArbitraryResourceInfo();
|
||||
arbitraryResourceInfo.name = transactionData.getName();
|
||||
arbitraryResourceInfo.service = transactionData.getService();
|
||||
@@ -450,8 +518,11 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
if (includeStatus != null && includeStatus == true) {
|
||||
resources = this.addStatusToResources(resources);
|
||||
if (includeStatus != null && includeStatus) {
|
||||
resources = ArbitraryTransactionUtils.addStatusToResources(resources);
|
||||
}
|
||||
if (includeMetadata != null && includeMetadata) {
|
||||
resources = ArbitraryTransactionUtils.addMetadataToResources(resources);
|
||||
}
|
||||
|
||||
return resources;
|
||||
@@ -461,6 +532,8 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@DELETE
|
||||
@Path("/resource/{service}/{name}/{identifier}")
|
||||
@Operation(
|
||||
@@ -479,7 +552,7 @@ public class ArbitraryResource {
|
||||
|
||||
Security.checkApiCallAllowed(request);
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
|
||||
return resource.delete();
|
||||
return resource.delete(false);
|
||||
}
|
||||
|
||||
@POST
|
||||
@@ -576,6 +649,7 @@ public class ArbitraryResource {
|
||||
@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
@@ -585,7 +659,7 @@ public class ArbitraryResource {
|
||||
Security.checkApiCallAllowed(request);
|
||||
}
|
||||
|
||||
return this.download(service, name, null, filepath, rebuild, async, attempts);
|
||||
return this.download(service, name, null, filepath, encoding, rebuild, async, attempts);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -611,16 +685,62 @@ public class ArbitraryResource {
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
|
||||
// Authentication can be bypassed in the settings, for those running public QDN nodes
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
Security.checkApiCallAllowed(request, apiKey);
|
||||
}
|
||||
|
||||
return this.download(service, name, identifier, filepath, rebuild, async, attempts);
|
||||
return this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts);
|
||||
}
|
||||
|
||||
|
||||
// Metadata
|
||||
|
||||
@GET
|
||||
@Path("/metadata/{service}/{name}/{identifier}")
|
||||
@Operation(
|
||||
summary = "Fetch raw metadata from resource with supplied service, name, identifier, and relative path",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Path to file structure containing requested data",
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = ArbitraryDataTransactionMetadata.class
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public ArbitraryResourceMetadata getMetadata(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier) {
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
|
||||
|
||||
try {
|
||||
ArbitraryDataTransactionMetadata transactionMetadata = ArbitraryMetadataManager.getInstance().fetchMetadata(resource, false);
|
||||
if (transactionMetadata != null) {
|
||||
ArbitraryResourceMetadata resourceMetadata = ArbitraryResourceMetadata.fromTransactionMetadata(transactionMetadata, true);
|
||||
if (resourceMetadata != null) {
|
||||
return resourceMetadata;
|
||||
}
|
||||
else {
|
||||
// The metadata file doesn't contain title, description, category, or tags
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
// No metadata exists for this resource
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FILE_NOT_FOUND);
|
||||
}
|
||||
|
||||
|
||||
@@ -656,6 +776,12 @@ public class ArbitraryResource {
|
||||
public String post(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String path) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -663,7 +789,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Path not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false,
|
||||
fee, null, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
@POST
|
||||
@@ -696,6 +823,12 @@ public class ArbitraryResource {
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String path) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -703,7 +836,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Path not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false,
|
||||
fee, null, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
|
||||
@@ -737,6 +871,13 @@ public class ArbitraryResource {
|
||||
public String postBase64EncodedData(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String base64) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -744,7 +885,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false,
|
||||
fee, filename, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
@POST
|
||||
@@ -775,6 +917,13 @@ public class ArbitraryResource {
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String base64) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -782,7 +931,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false,
|
||||
fee, filename, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
|
||||
@@ -815,6 +965,12 @@ public class ArbitraryResource {
|
||||
public String postZippedData(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String base64Zip) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -822,7 +978,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true);
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true,
|
||||
fee, null, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
@POST
|
||||
@@ -853,6 +1010,12 @@ public class ArbitraryResource {
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String base64Zip) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -860,7 +1023,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true);
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true,
|
||||
fee, null, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
|
||||
@@ -896,6 +1060,13 @@ public class ArbitraryResource {
|
||||
public String postString(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String string) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -903,7 +1074,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data string not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false,
|
||||
fee, filename, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
@POST
|
||||
@@ -936,6 +1108,13 @@ public class ArbitraryResource {
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
String string) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
@@ -943,13 +1122,49 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data string not supplied");
|
||||
}
|
||||
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false);
|
||||
return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false,
|
||||
fee, filename, title, description, tags, category, preview);
|
||||
}
|
||||
|
||||
|
||||
// Shared methods
|
||||
|
||||
private String upload(Service service, String name, String identifier, String path, String string, String base64, boolean zipped) {
|
||||
private String preview(String directoryPath, Service service) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.PUT;
|
||||
ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.ZIP;
|
||||
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath),
|
||||
null, service, null, method, compression,
|
||||
null, null, null, null);
|
||||
try {
|
||||
arbitraryDataWriter.save();
|
||||
} catch (IOException | DataException | InterruptedException | MissingDataException e) {
|
||||
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
} catch (RuntimeException e) {
|
||||
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
|
||||
}
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
|
||||
if (arbitraryDataFile != null) {
|
||||
String digest58 = arbitraryDataFile.digest58();
|
||||
if (digest58 != null) {
|
||||
// Pre-authorize resource
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(digest58, null, null, null);
|
||||
ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource);
|
||||
|
||||
return "/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret());
|
||||
}
|
||||
}
|
||||
return "Unable to generate preview URL";
|
||||
}
|
||||
|
||||
private String upload(Service service, String name, String identifier,
|
||||
String path, String string, String base64, boolean zipped, Long fee, String filename,
|
||||
String title, String description, List<String> tags, Category category,
|
||||
Boolean preview) {
|
||||
// Fetch public key from registered name
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
NameData nameData = repository.getNameRepository().fromName(name);
|
||||
@@ -958,7 +1173,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, error);
|
||||
}
|
||||
|
||||
if (!Controller.getInstance().isUpToDate()) {
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
}
|
||||
|
||||
@@ -972,7 +1188,12 @@ public class ArbitraryResource {
|
||||
if (path == null) {
|
||||
// See if we have a string instead
|
||||
if (string != null) {
|
||||
File tempFile = File.createTempFile("qortal-", ".tmp");
|
||||
if (filename == null) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
java.nio.file.Path tempDirectory = Files.createTempDirectory("qortal-");
|
||||
File tempFile = Paths.get(tempDirectory.toString(), filename).toFile();
|
||||
tempFile.deleteOnExit();
|
||||
BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile.toPath().toString()));
|
||||
writer.write(string);
|
||||
@@ -982,7 +1203,12 @@ public class ArbitraryResource {
|
||||
}
|
||||
// ... or base64 encoded raw data
|
||||
else if (base64 != null) {
|
||||
File tempFile = File.createTempFile("qortal-", ".tmp");
|
||||
if (filename == null) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
java.nio.file.Path tempDirectory = Files.createTempDirectory("qortal-");
|
||||
File tempFile = Paths.get(tempDirectory.toString(), filename).toFile();
|
||||
tempFile.deleteOnExit();
|
||||
Files.write(tempFile.toPath(), Base64.decode(base64));
|
||||
path = tempFile.toPath().toString();
|
||||
@@ -1011,9 +1237,20 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
// Finish here if user has requested a preview
|
||||
if (preview != null && preview == true) {
|
||||
return this.preview(path, service);
|
||||
}
|
||||
|
||||
// Default to zero fee if not specified
|
||||
if (fee == null) {
|
||||
fee = 0L;
|
||||
}
|
||||
|
||||
try {
|
||||
ArbitraryDataTransactionBuilder transactionBuilder = new ArbitraryDataTransactionBuilder(
|
||||
repository, publicKey58, Paths.get(path), name, null, service, identifier
|
||||
repository, publicKey58, fee, Paths.get(path), name, null, service, identifier,
|
||||
title, description, tags, category
|
||||
);
|
||||
|
||||
transactionBuilder.build();
|
||||
@@ -1031,7 +1268,7 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
private HttpServletResponse download(Service service, String name, String identifier, String filepath, boolean rebuild, boolean async, Integer maxAttempts) {
|
||||
private HttpServletResponse download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts) {
|
||||
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
try {
|
||||
@@ -1084,13 +1321,50 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: limit file size that can be read into memory
|
||||
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
|
||||
if (!Files.exists(path)) {
|
||||
String message = String.format("No file exists at filepath: %s", filepath);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message);
|
||||
}
|
||||
byte[] data = Files.readAllBytes(path);
|
||||
|
||||
byte[] data;
|
||||
int fileSize = (int)path.toFile().length();
|
||||
int length = fileSize;
|
||||
|
||||
// Parse "Range" header
|
||||
Integer rangeStart = null;
|
||||
Integer rangeEnd = null;
|
||||
String range = request.getHeader("Range");
|
||||
if (range != null) {
|
||||
range = range.replace("bytes=", "");
|
||||
String[] parts = range.split("-");
|
||||
rangeStart = (parts != null && parts.length > 0) ? Integer.parseInt(parts[0]) : null;
|
||||
rangeEnd = (parts != null && parts.length > 1) ? Integer.parseInt(parts[1]) : fileSize;
|
||||
}
|
||||
|
||||
if (rangeStart != null && rangeEnd != null) {
|
||||
// We have a range, so update the requested length
|
||||
length = rangeEnd - rangeStart;
|
||||
}
|
||||
|
||||
if (length < fileSize && encoding == null) {
|
||||
// Partial content requested, and not encoding the data
|
||||
response.setStatus(206);
|
||||
response.addHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd-1, fileSize));
|
||||
data = FilesystemUtils.readFromFile(path.toString(), rangeStart, length);
|
||||
}
|
||||
else {
|
||||
// Full content requested (or encoded data)
|
||||
response.setStatus(200);
|
||||
data = Files.readAllBytes(path); // TODO: limit file size that can be read into memory
|
||||
}
|
||||
|
||||
// Encode the data if requested
|
||||
if (encoding != null && Objects.equals(encoding.toLowerCase(), "base64")) {
|
||||
data = Base64.encode(data);
|
||||
}
|
||||
|
||||
response.addHeader("Accept-Ranges", "bytes");
|
||||
response.setContentType(context.getMimeType(path.toString()));
|
||||
response.setContentLength(data.length);
|
||||
response.getOutputStream().write(data);
|
||||
@@ -1102,37 +1376,44 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
private FileProperties getFileProperties(Service service, String name, String identifier) {
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
try {
|
||||
arbitraryDataReader.loadSynchronously(false);
|
||||
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
|
||||
if (outputPath == null) {
|
||||
// Assume the resource doesn't exist
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
|
||||
}
|
||||
|
||||
private ArbitraryResourceStatus getStatus(Service service, String name, String identifier, Boolean build) {
|
||||
FileProperties fileProperties = new FileProperties();
|
||||
fileProperties.size = FileUtils.sizeOfDirectory(outputPath.toFile());
|
||||
|
||||
// If "build=true" has been specified in the query string, build the resource before returning its status
|
||||
if (build != null && build == true) {
|
||||
ArbitraryDataReader reader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, null);
|
||||
try {
|
||||
if (!reader.isBuilding()) {
|
||||
reader.loadSynchronously(false);
|
||||
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
|
||||
if (files.length == 1) {
|
||||
String filename = files[0];
|
||||
java.nio.file.Path filePath = Paths.get(outputPath.toString(), files[0]);
|
||||
ContentInfoUtil util = new ContentInfoUtil();
|
||||
ContentInfo info = util.findMatch(filePath.toFile());
|
||||
String mimeType;
|
||||
if (info != null) {
|
||||
// Attempt to extract MIME type from file contents
|
||||
mimeType = info.getMimeType();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// No need to handle exception, as it will be reflected in the status
|
||||
else {
|
||||
// Fall back to using the filename
|
||||
FileNameMap fileNameMap = URLConnection.getFileNameMap();
|
||||
mimeType = fileNameMap.getContentTypeFor(filename);
|
||||
}
|
||||
fileProperties.filename = filename;
|
||||
fileProperties.mimeType = mimeType;
|
||||
}
|
||||
}
|
||||
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
|
||||
return resource.getStatus(false);
|
||||
}
|
||||
return fileProperties;
|
||||
|
||||
private List<ArbitraryResourceInfo> addStatusToResources(List<ArbitraryResourceInfo> resources) {
|
||||
// Determine and add the status of each resource
|
||||
List<ArbitraryResourceInfo> updatedResources = new ArrayList<>();
|
||||
for (ArbitraryResourceInfo resourceInfo : resources) {
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(resourceInfo.name, ResourceIdType.NAME,
|
||||
resourceInfo.service, resourceInfo.identifier);
|
||||
ArbitraryResourceStatus status = resource.getStatus(true);
|
||||
if (status != null) {
|
||||
resourceInfo.status = status;
|
||||
}
|
||||
updatedResources.add(resourceInfo);
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
return updatedResources;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
@Path("/blocks")
|
||||
@Tag(name = "Blocks")
|
||||
@@ -114,7 +115,7 @@ public class BlocksResource {
|
||||
@Path("/signature/{signature}/data")
|
||||
@Operation(
|
||||
summary = "Fetch serialized, base58 encoded block data using base58 signature",
|
||||
description = "Returns serialized data for the block that matches the given signature",
|
||||
description = "Returns serialized data for the block that matches the given signature, and an optional block serialization version",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "the block data",
|
||||
@@ -125,7 +126,7 @@ public class BlocksResource {
|
||||
@ApiErrors({
|
||||
ApiError.INVALID_SIGNATURE, ApiError.BLOCK_UNKNOWN, ApiError.INVALID_DATA, ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public String getSerializedBlockData(@PathParam("signature") String signature58) {
|
||||
public String getSerializedBlockData(@PathParam("signature") String signature58, @QueryParam("version") Integer version) {
|
||||
// Decode signature
|
||||
byte[] signature;
|
||||
try {
|
||||
@@ -136,20 +137,44 @@ public class BlocksResource {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Default to version 1
|
||||
if (version == null) {
|
||||
version = 1;
|
||||
}
|
||||
|
||||
// Check the database first
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
Block block = new Block(repository, blockData);
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
|
||||
bytes.write(BlockTransformer.toBytes(block));
|
||||
|
||||
switch (version) {
|
||||
case 1:
|
||||
bytes.write(BlockTransformer.toBytes(block));
|
||||
break;
|
||||
|
||||
case 2:
|
||||
bytes.write(BlockTransformer.toBytesV2(block));
|
||||
break;
|
||||
|
||||
default:
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
return Base58.encode(bytes.toByteArray());
|
||||
}
|
||||
|
||||
// Not found, so try the block archive
|
||||
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
|
||||
if (bytes != null) {
|
||||
return Base58.encode(bytes);
|
||||
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
|
||||
if (serializedBlock != null) {
|
||||
byte[] bytes = serializedBlock.getA();
|
||||
Integer serializationVersion = serializedBlock.getB();
|
||||
if (version != serializationVersion) {
|
||||
// TODO: we could quite easily reserialize the block with the requested version
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Block is not stored using requested serialization version.");
|
||||
}
|
||||
return Base58.encode(bytes);
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
@@ -613,13 +638,16 @@ public class BlocksResource {
|
||||
@ApiErrors({
|
||||
ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public List<BlockData> getBlockRange(@PathParam("height") int height, @Parameter(
|
||||
ref = "count"
|
||||
) @QueryParam("count") int count) {
|
||||
public List<BlockData> getBlockRange(@PathParam("height") int height,
|
||||
@Parameter(ref = "count") @QueryParam("count") int count,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse,
|
||||
@QueryParam("includeOnlineSignatures") Boolean includeOnlineSignatures) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<BlockData> blocks = new ArrayList<>();
|
||||
boolean shouldReverse = (reverse != null && reverse == true);
|
||||
|
||||
for (/* count already set */; count > 0; --count, ++height) {
|
||||
int i = 0;
|
||||
while (i < count) {
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null) {
|
||||
// Not found - try the archive
|
||||
@@ -629,8 +657,14 @@ public class BlocksResource {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (includeOnlineSignatures == null || includeOnlineSignatures == false) {
|
||||
blockData.setOnlineAccountsSignatures(null);
|
||||
}
|
||||
|
||||
blocks.add(blockData);
|
||||
|
||||
height = shouldReverse ? height - 1 : height + 1;
|
||||
i++;
|
||||
}
|
||||
|
||||
return blocks;
|
||||
|
||||
@@ -40,6 +40,8 @@ import org.qortal.utils.Base58;
|
||||
|
||||
import com.google.common.primitives.Bytes;
|
||||
|
||||
import static org.qortal.data.chat.ChatMessage.Encoding;
|
||||
|
||||
@Path("/chat")
|
||||
@Tag(name = "Chat")
|
||||
public class ChatResource {
|
||||
@@ -69,6 +71,11 @@ public class ChatResource {
|
||||
public List<ChatMessage> searchChat(@QueryParam("before") Long before, @QueryParam("after") Long after,
|
||||
@QueryParam("txGroupId") Integer txGroupId,
|
||||
@QueryParam("involving") List<String> involvingAddresses,
|
||||
@QueryParam("reference") String reference,
|
||||
@QueryParam("chatreference") String chatReference,
|
||||
@QueryParam("haschatreference") Boolean hasChatReference,
|
||||
@QueryParam("sender") String sender,
|
||||
@QueryParam("encoding") Encoding encoding,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
|
||||
@@ -87,18 +94,63 @@ public class ChatResource {
|
||||
if (after != null && after < 1500000000000L)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
byte[] referenceBytes = null;
|
||||
if (reference != null)
|
||||
referenceBytes = Base58.decode(reference);
|
||||
|
||||
byte[] chatReferenceBytes = null;
|
||||
if (chatReference != null)
|
||||
chatReferenceBytes = Base58.decode(chatReference);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getChatRepository().getMessagesMatchingCriteria(
|
||||
before,
|
||||
after,
|
||||
txGroupId,
|
||||
referenceBytes,
|
||||
chatReferenceBytes,
|
||||
hasChatReference,
|
||||
involvingAddresses,
|
||||
sender,
|
||||
encoding,
|
||||
limit, offset, reverse);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/message/{signature}")
|
||||
@Operation(
|
||||
summary = "Find chat message by signature",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "CHAT message",
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
implementation = ChatMessage.class
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public ChatMessage getMessageBySignature(@PathParam("signature") String signature58, @QueryParam("encoding") Encoding encoding) {
|
||||
byte[] signature = Base58.decode(signature58);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
ChatTransactionData chatTransactionData = (ChatTransactionData) repository.getTransactionRepository().fromSignature(signature);
|
||||
if (chatTransactionData == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Message not found");
|
||||
}
|
||||
|
||||
return repository.getChatRepository().toChatMessage(chatTransactionData, encoding);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/active/{address}")
|
||||
@Operation(
|
||||
@@ -116,12 +168,12 @@ public class ChatResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public ActiveChats getActiveChats(@PathParam("address") String address) {
|
||||
public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
|
||||
if (address == null || !Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getChatRepository().getActiveChats(address);
|
||||
return repository.getChatRepository().getActiveChats(address, encoding);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import java.util.List;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
@@ -35,6 +36,37 @@ public class CrossChainBitcoinResource {
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current Bitcoin block height",
|
||||
description = "Returns the height of the most recent block in the Bitcoin chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getBitcoinHeight() {
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = bitcoin.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
@@ -68,7 +100,7 @@ public class CrossChainBitcoinResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
Long balance = bitcoin.getWalletBalanceFromTransactions(key58);
|
||||
Long balance = bitcoin.getWalletBalance(key58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
@@ -118,11 +150,50 @@ public class CrossChainBitcoinResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/unusedaddress")
|
||||
@Operation(
|
||||
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getUnusedBitcoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
if (!bitcoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return bitcoin.getUnusedReceiveAddress(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
summary = "Sends BTC from hierarchical, deterministic BIP32 wallet to specific address",
|
||||
description = "Currently only supports 'legacy' P2PKH Bitcoin addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
description = "Currently supports 'legacy' P2PKH Bitcoin addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
|
||||
@@ -0,0 +1,248 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.bitcoinj.core.Transaction;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.crosschain.DigibyteSendRequest;
|
||||
import org.qortal.crosschain.Digibyte;
|
||||
import org.qortal.crosschain.ForeignBlockchainException;
|
||||
import org.qortal.crosschain.SimpleTransaction;
|
||||
|
||||
@Path("/crosschain/dgb")
|
||||
@Tag(name = "Cross-Chain (Digibyte)")
|
||||
public class CrossChainDigibyteResource {
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current Digibyte block height",
|
||||
description = "Returns the height of the most recent block in the Digibyte chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getDigibyteHeight() {
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = digibyte.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
summary = "Returns DGB balance for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getDigibyteWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
if (!digibyte.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
Long balance = digibyte.getWalletBalance(key58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return balance.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/wallettransactions")
|
||||
@Operation(
|
||||
summary = "Returns transactions for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public List<SimpleTransaction> getDigibyteWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
if (!digibyte.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return digibyte.getWalletTransactions(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/unusedaddress")
|
||||
@Operation(
|
||||
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getUnusedDigibyteReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
if (!digibyte.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return digibyte.getUnusedReceiveAddress(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
summary = "Sends DGB from hierarchical, deterministic BIP32 wallet to specific address",
|
||||
description = "Currently supports 'legacy' P2PKH Digibyte addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = DigibyteSendRequest.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, DigibyteSendRequest digibyteSendRequest) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (digibyteSendRequest.digibyteAmount <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
if (digibyteSendRequest.feePerByte != null && digibyteSendRequest.feePerByte <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
if (!digibyte.isValidAddress(digibyteSendRequest.receivingAddress))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
if (!digibyte.isValidDeterministicKey(digibyteSendRequest.xprv58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
Transaction spendTransaction = digibyte.buildSpend(digibyteSendRequest.xprv58,
|
||||
digibyteSendRequest.receivingAddress,
|
||||
digibyteSendRequest.digibyteAmount,
|
||||
digibyteSendRequest.feePerByte);
|
||||
|
||||
if (spendTransaction == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE);
|
||||
|
||||
try {
|
||||
digibyte.broadcastTransaction(spendTransaction);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
|
||||
return spendTransaction.getTxId().toString();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import org.qortal.crosschain.SimpleTransaction;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
@@ -33,6 +34,37 @@ public class CrossChainDogecoinResource {
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current Dogecoin block height",
|
||||
description = "Returns the height of the most recent block in the Dogecoin chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getDogecoinHeight() {
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = dogecoin.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
@@ -66,7 +98,7 @@ public class CrossChainDogecoinResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
Long balance = dogecoin.getWalletBalanceFromTransactions(key58);
|
||||
Long balance = dogecoin.getWalletBalance(key58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
@@ -116,6 +148,45 @@ public class CrossChainDogecoinResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/unusedaddress")
|
||||
@Operation(
|
||||
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getUnusedDogecoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
if (!dogecoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return dogecoin.getUnusedReceiveAddress(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
@@ -9,6 +10,8 @@ import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.*;
|
||||
@@ -284,6 +287,12 @@ public class CrossChainHtlcResource {
|
||||
continue;
|
||||
}
|
||||
|
||||
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
|
||||
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
|
||||
LOGGER.info("Skipping AT {} because ARRR is currently unsupported", atAddress);
|
||||
continue;
|
||||
}
|
||||
|
||||
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atData);
|
||||
if (crossChainTradeData == null) {
|
||||
LOGGER.info("Couldn't find crosschain trade data for AT {}", atAddress);
|
||||
@@ -363,10 +372,6 @@ public class CrossChainHtlcResource {
|
||||
// Use secret-A to redeem P2SH-A
|
||||
|
||||
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
|
||||
if (bitcoiny.getClass() == Bitcoin.class) {
|
||||
LOGGER.info("Redeeming a Bitcoin HTLC is not yet supported");
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
int lockTime = crossChainTradeData.lockTimeA;
|
||||
byte[] redeemScriptA = BitcoinyHTLC.buildScript(crossChainTradeData.partnerForeignPKH, lockTime, crossChainTradeData.creatorForeignPKH, crossChainTradeData.hashOfSecretA);
|
||||
@@ -574,70 +579,108 @@ public class CrossChainHtlcResource {
|
||||
// If the AT is "finished" then it will have a zero balance
|
||||
// In these cases we should avoid HTLC refunds if tbe QORT haven't been returned to the seller
|
||||
if (atData.getIsFinished() && crossChainTradeData.mode != AcctMode.REFUNDED && crossChainTradeData.mode != AcctMode.CANCELLED) {
|
||||
LOGGER.info(String.format("Skipping AT %s because the QORT has already been redemed", atAddress));
|
||||
LOGGER.info(String.format("Skipping AT %s because the QORT has already been redeemed by the buyer", atAddress));
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
TradeBotData tradeBotData = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).findFirst().orElse(null);
|
||||
if (tradeBotData == null)
|
||||
List<TradeBotData> tradeBotDataList = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).collect(Collectors.toList());
|
||||
if (tradeBotDataList == null || tradeBotDataList.isEmpty())
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
|
||||
if (bitcoiny.getClass() == Bitcoin.class) {
|
||||
LOGGER.info("Refunding a Bitcoin HTLC is not yet supported");
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
// Loop through all matching entries for this AT address, as there might be more than one
|
||||
for (TradeBotData tradeBotData : tradeBotDataList) {
|
||||
|
||||
int lockTime = tradeBotData.getLockTimeA();
|
||||
if (tradeBotData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
// We can't refund P2SH-A until lockTime-A has passed
|
||||
if (NTP.getTime() <= lockTime * 1000L)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
|
||||
Bitcoiny bitcoiny = (Bitcoiny) acct.getBlockchain();
|
||||
int lockTime = tradeBotData.getLockTimeA();
|
||||
|
||||
// We can't refund P2SH-A until median block time has passed lockTime-A (see BIP113)
|
||||
int medianBlockTime = bitcoiny.getMedianBlockTime();
|
||||
if (medianBlockTime <= lockTime)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
|
||||
// We can't refund P2SH-A until lockTime-A has passed
|
||||
if (NTP.getTime() <= lockTime * 1000L)
|
||||
continue;
|
||||
|
||||
byte[] redeemScriptA = BitcoinyHTLC.buildScript(tradeBotData.getTradeForeignPublicKeyHash(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
|
||||
String p2shAddressA = bitcoiny.deriveP2shAddress(redeemScriptA);
|
||||
LOGGER.info(String.format("Refunding P2SH address: %s", p2shAddressA));
|
||||
// We can't refund P2SH-A until median block time has passed lockTime-A (see BIP113)
|
||||
int medianBlockTime = bitcoiny.getMedianBlockTime();
|
||||
if (medianBlockTime <= lockTime)
|
||||
continue;
|
||||
|
||||
// Fee for redeem/refund is subtracted from P2SH-A balance.
|
||||
long feeTimestamp = calcFeeTimestamp(lockTime, crossChainTradeData.tradeTimeout);
|
||||
long p2shFee = bitcoiny.getP2shFee(feeTimestamp);
|
||||
long minimumAmountA = crossChainTradeData.expectedForeignAmount + p2shFee;
|
||||
BitcoinyHTLC.Status htlcStatusA = BitcoinyHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
|
||||
// Fee for redeem/refund is subtracted from P2SH-A balance.
|
||||
long feeTimestamp = calcFeeTimestamp(lockTime, crossChainTradeData.tradeTimeout);
|
||||
long p2shFee = bitcoiny.getP2shFee(feeTimestamp);
|
||||
long minimumAmountA = crossChainTradeData.expectedForeignAmount + p2shFee;
|
||||
|
||||
switch (htlcStatusA) {
|
||||
case UNFUNDED:
|
||||
case FUNDING_IN_PROGRESS:
|
||||
// Still waiting for P2SH-A to be funded...
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_TOO_SOON);
|
||||
// Create redeem script based on destination chain
|
||||
byte[] redeemScriptA;
|
||||
String p2shAddressA;
|
||||
BitcoinyHTLC.Status htlcStatusA;
|
||||
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
|
||||
redeemScriptA = PirateChainHTLC.buildScript(tradeBotData.getTradeForeignPublicKey(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
|
||||
p2shAddressA = PirateChain.getInstance().deriveP2shAddressBPrefix(redeemScriptA);
|
||||
htlcStatusA = PirateChainHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
|
||||
} else {
|
||||
redeemScriptA = BitcoinyHTLC.buildScript(tradeBotData.getTradeForeignPublicKeyHash(), lockTime, crossChainTradeData.creatorForeignPKH, tradeBotData.getHashOfSecret());
|
||||
p2shAddressA = bitcoiny.deriveP2shAddress(redeemScriptA);
|
||||
htlcStatusA = BitcoinyHTLC.determineHtlcStatus(bitcoiny.getBlockchainProvider(), p2shAddressA, minimumAmountA);
|
||||
}
|
||||
LOGGER.info(String.format("Refunding P2SH address: %s", p2shAddressA));
|
||||
|
||||
case REDEEM_IN_PROGRESS:
|
||||
case REDEEMED:
|
||||
case REFUND_IN_PROGRESS:
|
||||
case REFUNDED:
|
||||
// Too late!
|
||||
return false;
|
||||
switch (htlcStatusA) {
|
||||
case UNFUNDED:
|
||||
case FUNDING_IN_PROGRESS:
|
||||
// Still waiting for P2SH-A to be funded...
|
||||
continue;
|
||||
|
||||
case FUNDED:{
|
||||
Coin refundAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount);
|
||||
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
|
||||
List<TransactionOutput> fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
|
||||
case REDEEM_IN_PROGRESS:
|
||||
case REDEEMED:
|
||||
case REFUND_IN_PROGRESS:
|
||||
case REFUNDED:
|
||||
// Too late!
|
||||
continue;
|
||||
|
||||
// Validate the destination foreign blockchain address
|
||||
Address receiving = Address.fromString(bitcoiny.getNetworkParameters(), receiveAddress);
|
||||
if (receiving.getOutputScriptType() != Script.ScriptType.P2PKH)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
case FUNDED: {
|
||||
Coin refundAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount);
|
||||
|
||||
Transaction p2shRefundTransaction = BitcoinyHTLC.buildRefundTransaction(bitcoiny.getNetworkParameters(), refundAmount, refundKey,
|
||||
fundingOutputs, redeemScriptA, lockTime, receiving.getHash());
|
||||
if (Objects.equals(bitcoiny.getCurrencyCode(), "ARRR")) {
|
||||
// Pirate Chain custom integration
|
||||
|
||||
bitcoiny.broadcastTransaction(p2shRefundTransaction);
|
||||
return true;
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
String p2shAddressT3 = pirateChain.deriveP2shAddress(redeemScriptA);
|
||||
|
||||
// Get funding txid
|
||||
String fundingTxidHex = PirateChainHTLC.getUnspentFundingTxid(pirateChain.getBlockchainProvider(), p2shAddressA, minimumAmountA);
|
||||
if (fundingTxidHex == null) {
|
||||
throw new ForeignBlockchainException("Missing funding txid when refunding P2SH");
|
||||
}
|
||||
String fundingTxid58 = Base58.encode(HashCode.fromString(fundingTxidHex).asBytes());
|
||||
|
||||
byte[] privateKey = tradeBotData.getTradePrivateKey();
|
||||
String privateKey58 = Base58.encode(privateKey);
|
||||
String redeemScript58 = Base58.encode(redeemScriptA);
|
||||
|
||||
String txid = PirateChain.getInstance().refundP2sh(p2shAddressT3,
|
||||
receiveAddress, refundAmount.value, redeemScript58, fundingTxid58, lockTime, privateKey58);
|
||||
LOGGER.info("Refund txid: {}", txid);
|
||||
} else {
|
||||
// ElectrumX coins
|
||||
|
||||
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
|
||||
List<TransactionOutput> fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
|
||||
|
||||
// Validate the destination foreign blockchain address
|
||||
Address receiving = Address.fromString(bitcoiny.getNetworkParameters(), receiveAddress);
|
||||
if (receiving.getOutputScriptType() != Script.ScriptType.P2PKH)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
Transaction p2shRefundTransaction = BitcoinyHTLC.buildRefundTransaction(bitcoiny.getNetworkParameters(), refundAmount, refundKey,
|
||||
fundingOutputs, redeemScriptA, lockTime, receiving.getHash());
|
||||
|
||||
bitcoiny.broadcastTransaction(p2shRefundTransaction);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import java.util.List;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
@@ -35,6 +36,37 @@ public class CrossChainLitecoinResource {
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current Litecoin block height",
|
||||
description = "Returns the height of the most recent block in the Litecoin chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getLitecoinHeight() {
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = litecoin.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
@@ -68,7 +100,7 @@ public class CrossChainLitecoinResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
Long balance = litecoin.getWalletBalanceFromTransactions(key58);
|
||||
Long balance = litecoin.getWalletBalance(key58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
@@ -118,11 +150,50 @@ public class CrossChainLitecoinResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/unusedaddress")
|
||||
@Operation(
|
||||
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getUnusedLitecoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
if (!litecoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return litecoin.getUnusedReceiveAddress(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
summary = "Sends LTC from hierarchical, deterministic BIP32 wallet to specific address",
|
||||
description = "Currently only supports 'legacy' P2PKH Litecoin addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
description = "Currently supports 'legacy' P2PKH Litecoin addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
|
||||
@@ -0,0 +1,261 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.crosschain.PirateChainSendRequest;
|
||||
import org.qortal.crosschain.ForeignBlockchainException;
|
||||
import org.qortal.crosschain.PirateChain;
|
||||
import org.qortal.crosschain.SimpleTransaction;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.util.List;
|
||||
|
||||
@Path("/crosschain/arrr")
|
||||
@Tag(name = "Cross-Chain (Pirate Chain)")
|
||||
public class CrossChainPirateChainResource {
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current PirateChain block height",
|
||||
description = "Returns the height of the most recent block in the PirateChain chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getPirateChainHeight() {
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = pirateChain.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
summary = "Returns ARRR balance",
|
||||
description = "Supply 32 bytes of entropy, Base58 encoded",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "32 bytes of entropy, Base58 encoded",
|
||||
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getPirateChainWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
Long balance = pirateChain.getWalletBalance(entropy58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return balance.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/wallettransactions")
|
||||
@Operation(
|
||||
summary = "Returns transactions",
|
||||
description = "Supply 32 bytes of entropy, Base58 encoded",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "32 bytes of entropy, Base58 encoded",
|
||||
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public List<SimpleTransaction> getPirateChainWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return pirateChain.getWalletTransactions(entropy58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
summary = "Sends ARRR from wallet",
|
||||
description = "Currently supports 'legacy' P2PKH PirateChain addresses and Native SegWit (P2WPKH) addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "32 bytes of entropy, Base58 encoded",
|
||||
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, PirateChainSendRequest pirateChainSendRequest) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (pirateChainSendRequest.arrrAmount <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
if (pirateChainSendRequest.feePerByte != null && pirateChainSendRequest.feePerByte <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return pirateChain.sendCoins(pirateChainSendRequest);
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
// TODO
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@POST
|
||||
@Path("/walletaddress")
|
||||
@Operation(
|
||||
summary = "Returns main wallet address",
|
||||
description = "Supply 32 bytes of entropy, Base58 encoded",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "32 bytes of entropy, Base58 encoded",
|
||||
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getPirateChainWalletAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return pirateChain.getWalletAddress(entropy58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@POST
|
||||
@Path("/syncstatus")
|
||||
@Operation(
|
||||
summary = "Returns synchronization status",
|
||||
description = "Supply 32 bytes of entropy, Base58 encoded",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "32 bytes of entropy, Base58 encoded",
|
||||
example = "5oSXF53qENtdUyKhqSxYzP57m6RhVFP9BJKRr9E5kRGV"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getPirateChainSyncStatus(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String entropy58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return pirateChain.getSyncStatus(entropy58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,248 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.HeaderParam;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.bitcoinj.core.Transaction;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.crosschain.RavencoinSendRequest;
|
||||
import org.qortal.crosschain.Ravencoin;
|
||||
import org.qortal.crosschain.ForeignBlockchainException;
|
||||
import org.qortal.crosschain.SimpleTransaction;
|
||||
|
||||
@Path("/crosschain/rvn")
|
||||
@Tag(name = "Cross-Chain (Ravencoin)")
|
||||
public class CrossChainRavencoinResource {
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Path("/height")
|
||||
@Operation(
|
||||
summary = "Returns current Ravencoin block height",
|
||||
description = "Returns the height of the most recent block in the Ravencoin chain.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
schema = @Schema(
|
||||
type = "number"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
public String getRavencoinHeight() {
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
try {
|
||||
Integer height = ravencoin.getBlockchainHeight();
|
||||
if (height == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return height.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/walletbalance")
|
||||
@Operation(
|
||||
summary = "Returns RVN balance for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "balance (satoshis)"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getRavencoinWalletBalance(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
if (!ravencoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
Long balance = ravencoin.getWalletBalance(key58);
|
||||
if (balance == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
|
||||
return balance.toString();
|
||||
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/wallettransactions")
|
||||
@Operation(
|
||||
summary = "Returns transactions for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public List<SimpleTransaction> getRavencoinWalletTransactions(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
if (!ravencoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return ravencoin.getWalletTransactions(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/unusedaddress")
|
||||
@Operation(
|
||||
summary = "Returns first unused address for hierarchical, deterministic BIP32 wallet",
|
||||
description = "Supply BIP32 'm' private/public key in base58, starting with 'xprv'/'xpub' for mainnet, 'tprv'/'tpub' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string",
|
||||
description = "BIP32 'm' private/public key in base58",
|
||||
example = "tpubD6NzVbkrYhZ4XTPc4btCZ6SMgn8CxmWkj6VBVZ1tfcJfMq4UwAjZbG8U74gGSypL9XBYk2R2BLbDBe8pcEyBKM1edsGQEPKXNbEskZozeZc"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(array = @ArraySchema( schema = @Schema( implementation = SimpleTransaction.class ) ) )
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String getUnusedRavencoinReceiveAddress(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String key58) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
if (!ravencoin.isValidDeterministicKey(key58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
try {
|
||||
return ravencoin.getUnusedReceiveAddress(key58);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/send")
|
||||
@Operation(
|
||||
summary = "Sends RVN from hierarchical, deterministic BIP32 wallet to specific address",
|
||||
description = "Currently only supports 'legacy' P2PKH Ravencoin addresses. Supply BIP32 'm' private key in base58, starting with 'xprv' for mainnet, 'tprv' for testnet",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = RavencoinSendRequest.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string", description = "transaction hash"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String sendBitcoin(@HeaderParam(Security.API_KEY_HEADER) String apiKey, RavencoinSendRequest ravencoinSendRequest) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (ravencoinSendRequest.ravencoinAmount <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
if (ravencoinSendRequest.feePerByte != null && ravencoinSendRequest.feePerByte <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
if (!ravencoin.isValidAddress(ravencoinSendRequest.receivingAddress))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
if (!ravencoin.isValidDeterministicKey(ravencoinSendRequest.xprv58))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
|
||||
|
||||
Transaction spendTransaction = ravencoin.buildSpend(ravencoinSendRequest.xprv58,
|
||||
ravencoinSendRequest.receivingAddress,
|
||||
ravencoinSendRequest.ravencoinAmount,
|
||||
ravencoinSendRequest.feePerByte);
|
||||
|
||||
if (spendTransaction == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE);
|
||||
|
||||
try {
|
||||
ravencoin.broadcastTransaction(spendTransaction);
|
||||
} catch (ForeignBlockchainException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
|
||||
}
|
||||
|
||||
return spendTransaction.getTxId().toString();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -25,6 +25,7 @@ import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.api.model.CrossChainCancelRequest;
|
||||
import org.qortal.api.model.CrossChainTradeSummary;
|
||||
import org.qortal.controller.tradebot.TradeBot;
|
||||
import org.qortal.crosschain.SupportedBlockchain;
|
||||
import org.qortal.crosschain.ACCT;
|
||||
import org.qortal.crosschain.AcctMode;
|
||||
@@ -120,6 +121,8 @@ public class CrossChainResource {
|
||||
crossChainTrades = crossChainTrades.subList(0, upperLimit);
|
||||
}
|
||||
|
||||
crossChainTrades.stream().forEach(CrossChainResource::decorateTradeDataWithPresence);
|
||||
|
||||
return crossChainTrades;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
@@ -151,7 +154,11 @@ public class CrossChainResource {
|
||||
if (acct == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
return acct.populateTradeData(repository, atData);
|
||||
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atData);
|
||||
|
||||
decorateTradeDataWithPresence(crossChainTradeData);
|
||||
|
||||
return crossChainTradeData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@@ -486,4 +493,7 @@ public class CrossChainResource {
|
||||
}
|
||||
}
|
||||
|
||||
private static void decorateTradeDataWithPresence(CrossChainTradeData crossChainTradeData) {
|
||||
TradeBot.getInstance().decorateTradeDataWithPresence(crossChainTradeData);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -38,10 +39,14 @@ import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
import org.qortal.data.crosschain.TradeBotData;
|
||||
import org.qortal.data.transaction.MessageTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
@Path("/crosschain/tradebot")
|
||||
@Tag(name = "Cross-Chain (Trade-Bot)")
|
||||
@@ -137,7 +142,8 @@ public class CrossChainTradeBotResource {
|
||||
if (tradeBotCreateRequest.qortAmount <= 0 || tradeBotCreateRequest.fundingQortAmount <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ORDER_SIZE_TOO_SMALL);
|
||||
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@@ -153,7 +159,7 @@ public class CrossChainTradeBotResource {
|
||||
|
||||
return Base58.encode(unsignedBytes);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -198,7 +204,8 @@ public class CrossChainTradeBotResource {
|
||||
if (tradeBotRespondRequest.receivingAddress == null || !Crypto.isValidAddress(tradeBotRespondRequest.receivingAddress))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
// Extract data from cross-chain trading AT
|
||||
@@ -220,6 +227,17 @@ public class CrossChainTradeBotResource {
|
||||
if (crossChainTradeData.mode != AcctMode.OFFERING)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
// Check if there is a buy or a cancel request in progress for this trade
|
||||
List<Transaction.TransactionType> txTypes = List.of(Transaction.TransactionType.MESSAGE);
|
||||
List<TransactionData> unconfirmed = repository.getTransactionRepository().getUnconfirmedTransactions(txTypes, null, 0, 0, false);
|
||||
for (TransactionData transactionData : unconfirmed) {
|
||||
MessageTransactionData messageTransactionData = (MessageTransactionData) transactionData;
|
||||
if (Objects.equals(messageTransactionData.getRecipient(), atAddress)) {
|
||||
// There is a pending request for this trade, so block this buy attempt to reduce the risk of refunds
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Trade has an existing buy request or is pending cancellation.");
|
||||
}
|
||||
}
|
||||
|
||||
AcctTradeBot.ResponseResult result = TradeBot.getInstance().startResponse(repository, atData, acct, crossChainTradeData,
|
||||
tradeBotRespondRequest.foreignKey, tradeBotRespondRequest.receivingAddress);
|
||||
|
||||
@@ -237,7 +255,7 @@ public class CrossChainTradeBotResource {
|
||||
return "false";
|
||||
}
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,15 @@ public class GroupsResource {
|
||||
ref = "reverse"
|
||||
) @QueryParam("reverse") Boolean reverse) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getGroupRepository().getAllGroups(limit, offset, reverse);
|
||||
List<GroupData> allGroupData = repository.getGroupRepository().getAllGroups(limit, offset, reverse);
|
||||
allGroupData.forEach(groupData -> {
|
||||
try {
|
||||
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupData.getGroupId());
|
||||
} catch (DataException e) {
|
||||
// Exclude memberCount for this group
|
||||
}
|
||||
});
|
||||
return allGroupData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@@ -150,7 +158,15 @@ public class GroupsResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getGroupRepository().getGroupsWithMember(member);
|
||||
List<GroupData> allGroupData = repository.getGroupRepository().getGroupsWithMember(member);
|
||||
allGroupData.forEach(groupData -> {
|
||||
try {
|
||||
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupData.getGroupId());
|
||||
} catch (DataException e) {
|
||||
// Exclude memberCount for this group
|
||||
}
|
||||
});
|
||||
return allGroupData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@@ -177,6 +193,7 @@ public class GroupsResource {
|
||||
if (groupData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.GROUP_UNKNOWN);
|
||||
|
||||
groupData.memberCount = repository.getGroupRepository().countGroupMembers(groupId);
|
||||
return groupData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
@@ -922,4 +939,4 @@ public class GroupsResource {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiException;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.model.NameSummary;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.BuyNameTransactionData;
|
||||
@@ -101,7 +102,14 @@ public class NamesResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<NameData> names = repository.getNameRepository().getNamesByOwner(address, limit, offset, reverse);
|
||||
List<NameData> names;
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
names = LiteNode.getInstance().fetchAccountNames(address);
|
||||
}
|
||||
else {
|
||||
names = repository.getNameRepository().getNamesByOwner(address, limit, offset, reverse);
|
||||
}
|
||||
|
||||
return names.stream().map(NameSummary::new).collect(Collectors.toList());
|
||||
} catch (DataException e) {
|
||||
@@ -126,10 +134,18 @@ public class NamesResource {
|
||||
@ApiErrors({ApiError.NAME_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public NameData getName(@PathParam("name") String name) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
NameData nameData = repository.getNameRepository().fromName(name);
|
||||
NameData nameData;
|
||||
|
||||
if (nameData == null)
|
||||
if (Settings.getInstance().isLite()) {
|
||||
nameData = LiteNode.getInstance().fetchNameData(name);
|
||||
}
|
||||
else {
|
||||
nameData = repository.getNameRepository().fromName(name);
|
||||
}
|
||||
|
||||
if (nameData == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NAME_UNKNOWN);
|
||||
}
|
||||
|
||||
return nameData;
|
||||
} catch (ApiException e) {
|
||||
|
||||
@@ -20,6 +20,11 @@ import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.core.config.Configuration;
|
||||
import org.apache.logging.log4j.core.config.LoggerConfig;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.qortal.api.*;
|
||||
import org.qortal.api.model.ConnectedPeer;
|
||||
import org.qortal.api.model.PeersSummary;
|
||||
@@ -61,7 +66,7 @@ public class PeersResource {
|
||||
}
|
||||
)
|
||||
public List<ConnectedPeer> getPeers() {
|
||||
return Network.getInstance().getConnectedPeers().stream().map(ConnectedPeer::new).collect(Collectors.toList());
|
||||
return Network.getInstance().getImmutableConnectedPeers().stream().map(ConnectedPeer::new).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -127,9 +132,29 @@ public class PeersResource {
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public ExecuteProduceConsume.StatsSnapshot getEngineStats(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
|
||||
public ExecuteProduceConsume.StatsSnapshot getEngineStats(@HeaderParam(Security.API_KEY_HEADER) String apiKey, @QueryParam("newLoggingLevel") Level newLoggingLevel) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (newLoggingLevel != null) {
|
||||
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
|
||||
final Configuration config = ctx.getConfiguration();
|
||||
|
||||
String epcClassName = "org.qortal.network.Network.NetworkProcessor";
|
||||
LoggerConfig loggerConfig = config.getLoggerConfig(epcClassName);
|
||||
LoggerConfig specificConfig = loggerConfig;
|
||||
|
||||
// We need a specific configuration for this logger,
|
||||
// otherwise we would change the level of all other loggers
|
||||
// having the original configuration as parent as well
|
||||
if (!loggerConfig.getName().equals(epcClassName)) {
|
||||
specificConfig = new LoggerConfig(epcClassName, newLoggingLevel, true);
|
||||
specificConfig.setParent(loggerConfig);
|
||||
config.addLogger(epcClassName, specificConfig);
|
||||
}
|
||||
specificConfig.setLevel(newLoggingLevel);
|
||||
ctx.updateLoggers();
|
||||
}
|
||||
|
||||
return Network.getInstance().getStatsSnapshot();
|
||||
}
|
||||
|
||||
@@ -304,7 +329,7 @@ public class PeersResource {
|
||||
PeerAddress peerAddress = PeerAddress.fromString(targetPeerAddress);
|
||||
InetSocketAddress resolvedAddress = peerAddress.toSocketAddress();
|
||||
|
||||
List<Peer> peers = Network.getInstance().getHandshakedPeers();
|
||||
List<Peer> peers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
Peer targetPeer = peers.stream().filter(peer -> peer.getResolvedAddress().equals(resolvedAddress)).findFirst().orElse(null);
|
||||
|
||||
if (targetPeer == null)
|
||||
@@ -352,9 +377,9 @@ public class PeersResource {
|
||||
public PeersSummary peersSummary() {
|
||||
PeersSummary peersSummary = new PeersSummary();
|
||||
|
||||
List<Peer> connectedPeers = Network.getInstance().getConnectedPeers().stream().collect(Collectors.toList());
|
||||
List<Peer> connectedPeers = Network.getInstance().getImmutableConnectedPeers().stream().collect(Collectors.toList());
|
||||
for (Peer peer : connectedPeers) {
|
||||
if (peer.isOutbound()) {
|
||||
if (!peer.isOutbound()) {
|
||||
peersSummary.inboundConnections++;
|
||||
}
|
||||
else {
|
||||
|
||||
197
src/main/java/org/qortal/api/resource/PollsResource.java
Normal file
197
src/main/java/org/qortal/api/resource/PollsResource.java
Normal file
@@ -0,0 +1,197 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.data.transaction.CreatePollTransactionData;
|
||||
import org.qortal.data.transaction.PaymentTransactionData;
|
||||
import org.qortal.data.transaction.VoteOnPollTransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.transaction.CreatePollTransactionTransformer;
|
||||
import org.qortal.transform.transaction.PaymentTransactionTransformer;
|
||||
import org.qortal.transform.transaction.VoteOnPollTransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
import java.util.List;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import org.qortal.api.ApiException;
|
||||
import org.qortal.data.voting.PollData;
|
||||
|
||||
@Path("/polls")
|
||||
@Tag(name = "Polls")
|
||||
public class PollsResource {
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@GET
|
||||
@Operation(
|
||||
summary = "List all polls",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "poll info",
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
array = @ArraySchema(schema = @Schema(implementation = PollData.class))
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public List<PollData> getAllPolls(@Parameter(
|
||||
ref = "limit"
|
||||
) @QueryParam("limit") Integer limit, @Parameter(
|
||||
ref = "offset"
|
||||
) @QueryParam("offset") Integer offset, @Parameter(
|
||||
ref = "reverse"
|
||||
) @QueryParam("reverse") Boolean reverse) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<PollData> allPollData = repository.getVotingRepository().getAllPolls(limit, offset, reverse);
|
||||
return allPollData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/{pollName}")
|
||||
@Operation(
|
||||
summary = "Info on poll",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "poll info",
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(implementation = PollData.class)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
public PollData getPollData(@PathParam("pollName") String pollName) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PollData pollData = repository.getVotingRepository().fromPollName(pollName);
|
||||
if (pollData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.POLL_NO_EXISTS);
|
||||
|
||||
return pollData;
|
||||
} catch (ApiException e) {
|
||||
throw e;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/create")
|
||||
@Operation(
|
||||
summary = "Build raw, unsigned, CREATE_POLL transaction",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = CreatePollTransactionData.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, CREATE_POLL transaction encoded in Base58",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.NON_PRODUCTION, ApiError.TRANSACTION_INVALID, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE})
|
||||
public String CreatePoll(CreatePollTransactionData transactionData) {
|
||||
if (Settings.getInstance().isApiRestricted())
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NON_PRODUCTION);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Transaction transaction = Transaction.fromData(repository, transactionData);
|
||||
|
||||
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
|
||||
if (result != Transaction.ValidationResult.OK)
|
||||
throw TransactionsResource.createTransactionInvalidException(request, result);
|
||||
|
||||
byte[] bytes = CreatePollTransactionTransformer.toBytes(transactionData);
|
||||
return Base58.encode(bytes);
|
||||
} catch (TransformationException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/vote")
|
||||
@Operation(
|
||||
summary = "Build raw, unsigned, VOTE_ON_POLL transaction",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = VoteOnPollTransactionData.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, VOTE_ON_POLL transaction encoded in Base58",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.NON_PRODUCTION, ApiError.TRANSACTION_INVALID, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE})
|
||||
public String VoteOnPoll(VoteOnPollTransactionData transactionData) {
|
||||
if (Settings.getInstance().isApiRestricted())
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NON_PRODUCTION);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Transaction transaction = Transaction.fromData(repository, transactionData);
|
||||
|
||||
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
|
||||
if (result != Transaction.ValidationResult.OK)
|
||||
throw TransactionsResource.createTransactionInvalidException(request, result);
|
||||
|
||||
byte[] bytes = VoteOnPollTransactionTransformer.toBytes(transactionData);
|
||||
return Base58.encode(bytes);
|
||||
} catch (TransformationException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -9,29 +9,27 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
import org.qortal.api.ApiException;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.*;
|
||||
import org.qortal.api.model.SimpleTransactionSignRequest;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.globalization.Translator;
|
||||
import org.qortal.repository.DataException;
|
||||
@@ -250,14 +248,29 @@ public class TransactionsResource {
|
||||
ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public List<TransactionData> getUnconfirmedTransactions(@Parameter(
|
||||
description = "A list of transaction types"
|
||||
) @QueryParam("txType") List<TransactionType> txTypes, @Parameter(
|
||||
description = "Transaction creator's base58 encoded public key"
|
||||
) @QueryParam("creator") String creatorPublicKey58, @Parameter(
|
||||
ref = "limit"
|
||||
) @QueryParam("limit") Integer limit, @Parameter(
|
||||
ref = "offset"
|
||||
) @QueryParam("offset") Integer offset, @Parameter(
|
||||
ref = "reverse"
|
||||
) @QueryParam("reverse") Boolean reverse) {
|
||||
|
||||
// Decode public key if supplied
|
||||
byte[] creatorPublicKey = null;
|
||||
if (creatorPublicKey58 != null) {
|
||||
try {
|
||||
creatorPublicKey = Base58.decode(creatorPublicKey58);
|
||||
} catch (NumberFormatException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY, e);
|
||||
}
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getTransactionRepository().getUnconfirmedTransactions(limit, offset, reverse);
|
||||
return repository.getTransactionRepository().getUnconfirmedTransactions(txTypes, creatorPublicKey, limit, offset, reverse);
|
||||
} catch (ApiException e) {
|
||||
throw e;
|
||||
} catch (DataException e) {
|
||||
@@ -366,6 +379,73 @@ public class TransactionsResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/address/{address}")
|
||||
@Operation(
|
||||
summary = "Returns transactions for given address",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "transactions",
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = TransactionData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||
public List<TransactionData> getAddressTransactions(@PathParam("address") String address,
|
||||
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
|
||||
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
|
||||
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
|
||||
if (!Crypto.isValidAddress(address)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
if (limit == null) {
|
||||
limit = 0;
|
||||
}
|
||||
if (offset == null) {
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
List<TransactionData> transactions;
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
// Fetch from network
|
||||
transactions = LiteNode.getInstance().fetchAccountTransactions(address, limit, offset);
|
||||
|
||||
// Sort the data, since we can't guarantee the order that a peer sent it in
|
||||
if (reverse) {
|
||||
transactions.sort(Comparator.comparingLong(TransactionData::getTimestamp).reversed());
|
||||
} else {
|
||||
transactions.sort(Comparator.comparingLong(TransactionData::getTimestamp));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Fetch from local db
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null,
|
||||
null, null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED, limit, offset, reverse);
|
||||
|
||||
// Expand signatures to transactions
|
||||
transactions = new ArrayList<>(signatures.size());
|
||||
for (byte[] signature : signatures) {
|
||||
transactions.add(repository.getTransactionRepository().fromSignature(signature));
|
||||
}
|
||||
} catch (ApiException e) {
|
||||
throw e;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
return transactions;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/unitfee")
|
||||
@Operation(
|
||||
@@ -624,7 +704,7 @@ public class TransactionsResource {
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "true if accepted, false otherwise",
|
||||
description = "For API version 1, this returns true if accepted.\nFor API version 2, the transactionData is returned as a JSON string if accepted.",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
@@ -637,8 +717,13 @@ public class TransactionsResource {
|
||||
@ApiErrors({
|
||||
ApiError.BLOCKCHAIN_NEEDS_SYNC, ApiError.INVALID_SIGNATURE, ApiError.INVALID_DATA, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public String processTransaction(String rawBytes58) {
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
public String processTransaction(String rawBytes58, @HeaderParam(ApiService.API_VERSION_HEADER) String apiVersionHeader) {
|
||||
int apiVersion = ApiService.getApiVersion(request);
|
||||
|
||||
// Only allow a transaction to be processed if our latest block is less than 60 minutes old
|
||||
// If older than this, we should first wait until the blockchain is synced
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
byte[] rawBytes = Base58.decode(rawBytes58);
|
||||
@@ -660,7 +745,7 @@ public class TransactionsResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_SIGNATURE);
|
||||
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS))
|
||||
if (!blockchainLock.tryLock(60, TimeUnit.SECONDS))
|
||||
throw createTransactionInvalidException(request, ValidationResult.NO_BLOCKCHAIN_LOCK);
|
||||
|
||||
try {
|
||||
@@ -671,13 +756,27 @@ public class TransactionsResource {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
|
||||
return "true";
|
||||
switch (apiVersion) {
|
||||
case 1:
|
||||
return "true";
|
||||
|
||||
case 2:
|
||||
default:
|
||||
// Marshall transactionData to string
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
ApiRequest.marshall(stringWriter, transactionData);
|
||||
return stringWriter.toString();
|
||||
}
|
||||
|
||||
|
||||
} catch (NumberFormatException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA, e);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
} catch (InterruptedException e) {
|
||||
throw createTransactionInvalidException(request, ValidationResult.NO_BLOCKCHAIN_LOCK);
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package org.qortal.api.resource;
|
||||
package org.qortal.api.restricted.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
@@ -20,6 +20,7 @@ import java.time.LocalDate;
|
||||
import java.time.LocalTime;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
@@ -31,11 +32,13 @@ import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.commons.lang3.reflect.FieldUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.appender.RollingFileAppender;
|
||||
import org.checkerframework.checker.units.qual.A;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.api.*;
|
||||
@@ -43,9 +46,11 @@ import org.qortal.api.model.ActivitySummary;
|
||||
import org.qortal.api.model.NodeInfo;
|
||||
import org.qortal.api.model.NodeStatus;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.AutoUpdate;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.controller.Synchronizer.SynchronizationResult;
|
||||
import org.qortal.controller.repository.BlockArchiveRebuilder;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.network.Network;
|
||||
@@ -120,10 +125,23 @@ public class AdminResource {
|
||||
nodeInfo.buildTimestamp = Controller.getInstance().getBuildTimestamp();
|
||||
nodeInfo.nodeId = Network.getInstance().getOurNodeId();
|
||||
nodeInfo.isTestNet = Settings.getInstance().isTestNet();
|
||||
nodeInfo.type = getNodeType();
|
||||
|
||||
return nodeInfo;
|
||||
}
|
||||
|
||||
private String getNodeType() {
|
||||
if (Settings.getInstance().isLite()) {
|
||||
return "lite";
|
||||
}
|
||||
else if (Settings.getInstance().isTopOnly()) {
|
||||
return "topOnly";
|
||||
}
|
||||
else {
|
||||
return "full";
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/status")
|
||||
@Operation(
|
||||
@@ -140,6 +158,53 @@ public class AdminResource {
|
||||
return nodeStatus;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/settings")
|
||||
@Operation(
|
||||
summary = "Fetch node settings",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = Settings.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
public Settings settings() {
|
||||
Settings nodeSettings = Settings.getInstance();
|
||||
|
||||
return nodeSettings;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/settings/{setting}")
|
||||
@Operation(
|
||||
summary = "Fetch a single node setting",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
|
||||
)
|
||||
}
|
||||
)
|
||||
public String setting(@PathParam("setting") String setting) {
|
||||
try {
|
||||
Object settingValue = FieldUtils.readField(Settings.getInstance(), setting, true);
|
||||
if (settingValue == null) {
|
||||
return "null";
|
||||
}
|
||||
else if (settingValue instanceof String[]) {
|
||||
JSONArray array = new JSONArray(settingValue);
|
||||
return array.toString(4);
|
||||
}
|
||||
else if (settingValue instanceof List) {
|
||||
JSONArray array = new JSONArray((List<Object>) settingValue);
|
||||
return array.toString(4);
|
||||
}
|
||||
|
||||
return settingValue.toString();
|
||||
} catch (IllegalAccessException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/stop")
|
||||
@Operation(
|
||||
@@ -170,6 +235,37 @@ public class AdminResource {
|
||||
return "true";
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/restart")
|
||||
@Operation(
|
||||
summary = "Restart",
|
||||
description = "Restart",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "\"true\"",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String restart(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
new Thread(() -> {
|
||||
// Short sleep to allow HTTP response body to be emitted
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
// Not important
|
||||
}
|
||||
|
||||
AutoUpdate.attemptRestart();
|
||||
|
||||
}).start();
|
||||
|
||||
return "true";
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/summary")
|
||||
@Operation(
|
||||
@@ -210,6 +306,42 @@ public class AdminResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/summary/alltime")
|
||||
@Operation(
|
||||
summary = "Summary of activity since genesis",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(schema = @Schema(implementation = ActivitySummary.class))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public ActivitySummary allTimeSummary(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
ActivitySummary summary = new ActivitySummary();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int startHeight = 1;
|
||||
long start = repository.getBlockRepository().fromHeight(startHeight).getTimestamp();
|
||||
int endHeight = repository.getBlockRepository().getBlockchainHeight();
|
||||
|
||||
summary.setBlockCount(endHeight - startHeight);
|
||||
|
||||
summary.setTransactionCountByType(repository.getTransactionRepository().getTransactionSummary(startHeight + 1, endHeight));
|
||||
|
||||
summary.setAssetsIssued(repository.getAssetRepository().getRecentAssetIds(start).size());
|
||||
|
||||
summary.setNamesRegistered (repository.getNameRepository().getRecentNames(start).size());
|
||||
|
||||
return summary;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/enginestats")
|
||||
@Operation(
|
||||
@@ -315,6 +447,7 @@ public class AdminResource {
|
||||
|
||||
repository.getAccountRepository().save(mintingAccountData);
|
||||
repository.saveChanges();
|
||||
repository.exportNodeLocalData();//after adding new minting account let's persist it to the backup MintingAccounts.json
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY, e);
|
||||
} catch (DataException e) {
|
||||
@@ -355,6 +488,7 @@ public class AdminResource {
|
||||
return "false";
|
||||
|
||||
repository.saveChanges();
|
||||
repository.exportNodeLocalData();//after removing new minting account let's persist it to the backup MintingAccounts.json
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY, e);
|
||||
} catch (DataException e) {
|
||||
@@ -380,6 +514,10 @@ public class AdminResource {
|
||||
) @QueryParam("limit") Integer limit, @Parameter(
|
||||
ref = "offset"
|
||||
) @QueryParam("offset") Integer offset, @Parameter(
|
||||
name = "tail",
|
||||
description = "Fetch most recent log lines",
|
||||
schema = @Schema(type = "boolean")
|
||||
) @QueryParam("tail") Boolean tail, @Parameter(
|
||||
ref = "reverse"
|
||||
) @QueryParam("reverse") Boolean reverse) {
|
||||
LoggerContext loggerContext = (LoggerContext) LogManager.getContext();
|
||||
@@ -395,6 +533,13 @@ public class AdminResource {
|
||||
if (reverse != null && reverse)
|
||||
logLines = Lists.reverse(logLines);
|
||||
|
||||
// Tail mode - return the last X lines (where X = limit)
|
||||
if (tail != null && tail) {
|
||||
if (limit != null && limit > 0) {
|
||||
offset = logLines.size() - limit;
|
||||
}
|
||||
}
|
||||
|
||||
// offset out of bounds?
|
||||
if (offset != null && (offset < 0 || offset >= logLines.size()))
|
||||
return "";
|
||||
@@ -415,7 +560,7 @@ public class AdminResource {
|
||||
|
||||
limit = Math.min(limit, logLines.size());
|
||||
|
||||
logLines.subList(limit - 1, logLines.size()).clear();
|
||||
logLines.subList(limit, logLines.size()).clear();
|
||||
|
||||
return String.join("\n", logLines);
|
||||
} catch (IOException e) {
|
||||
@@ -512,7 +657,7 @@ public class AdminResource {
|
||||
PeerAddress peerAddress = PeerAddress.fromString(targetPeerAddress);
|
||||
InetSocketAddress resolvedAddress = peerAddress.toSocketAddress();
|
||||
|
||||
List<Peer> peers = Network.getInstance().getHandshakedPeers();
|
||||
List<Peer> peers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
Peer targetPeer = peers.stream().filter(peer -> peer.getResolvedAddress().equals(resolvedAddress)).findFirst().orElse(null);
|
||||
|
||||
if (targetPeer == null)
|
||||
@@ -546,7 +691,7 @@ public class AdminResource {
|
||||
@Path("/repository/data")
|
||||
@Operation(
|
||||
summary = "Export sensitive/node-local data from repository.",
|
||||
description = "Exports data to .script files on local machine"
|
||||
description = "Exports data to .json files on local machine"
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_DATA, ApiError.REPOSITORY_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
@@ -587,10 +732,6 @@ public class AdminResource {
|
||||
public String importRepository(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String filename) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
// Hard-coded because it's too dangerous to allow user-supplied filenames in weaker security contexts
|
||||
if (Settings.getInstance().getApiKey() == null)
|
||||
filename = "qortal-backup/TradeBotStates.json";
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
|
||||
@@ -677,6 +818,64 @@ public class AdminResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/repository/archive/rebuild")
|
||||
@Operation(
|
||||
summary = "Rebuild archive",
|
||||
description = "Rebuilds archive files, using the specified serialization version",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "number", example = "2"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "\"true\"",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey, Integer serializationVersion) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
// Default serialization version to value specified in settings
|
||||
if (serializationVersion == null) {
|
||||
serializationVersion = Settings.getInstance().getDefaultArchiveVersion();
|
||||
}
|
||||
|
||||
try {
|
||||
// We don't actually need to lock the blockchain here, but we'll do it anyway so that
|
||||
// the node can focus on rebuilding rather than synchronizing / minting.
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
try {
|
||||
BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(serializationVersion);
|
||||
blockArchiveRebuilder.start();
|
||||
|
||||
return "true";
|
||||
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
|
||||
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// We couldn't lock blockchain to perform rebuild
|
||||
return "false";
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@DELETE
|
||||
@Path("/repository")
|
||||
@Operation(
|
||||
@@ -707,6 +906,49 @@ public class AdminResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/repository/importarchivedtrades")
|
||||
@Operation(
|
||||
summary = "Imports archived trades from TradeBotStatesArchive.json",
|
||||
description = "This can be used to recover trades that exist in the archive only, which may be needed if a<br />" +
|
||||
"problem occurred during the proof-of-work computation stage of a buy request.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.REPOSITORY_ISSUE})
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public boolean importArchivedTrades(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
try {
|
||||
repository.importDataFromFile("qortal-backup/TradeBotStatesArchive.json");
|
||||
repository.saveChanges();
|
||||
|
||||
return true;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
|
||||
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// We couldn't lock blockchain to perform import
|
||||
return false;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@POST
|
||||
@Path("/apikey/generate")
|
||||
@@ -1,4 +1,4 @@
|
||||
package org.qortal.api.resource;
|
||||
package org.qortal.api.restricted.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
@@ -60,7 +60,7 @@ public class BootstrapResource {
|
||||
bootstrap.validateBlockchain();
|
||||
return bootstrap.create();
|
||||
|
||||
} catch (DataException | InterruptedException | IOException e) {
|
||||
} catch (Exception e) {
|
||||
LOGGER.info("Unable to create bootstrap", e);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package org.qortal.api.resource;
|
||||
package org.qortal.api.restricted.resource;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -8,7 +8,6 @@ import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.io.*;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Map;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
@@ -28,8 +27,8 @@ import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.*;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
|
||||
@@ -43,58 +42,6 @@ public class RenderResource {
|
||||
@Context HttpServletResponse response;
|
||||
@Context ServletContext context;
|
||||
|
||||
@POST
|
||||
@Path("/preview")
|
||||
@Operation(
|
||||
summary = "Generate preview URL based on a user-supplied path and service",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string", example = "/Users/user/Documents/MyStaticWebsite"
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "a temporary URL to preview the website",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String preview(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String directoryPath) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
Method method = Method.PUT;
|
||||
Compression compression = Compression.ZIP;
|
||||
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath), null, Service.WEBSITE, null, method, compression);
|
||||
try {
|
||||
arbitraryDataWriter.save();
|
||||
} catch (IOException | DataException | InterruptedException | MissingDataException e) {
|
||||
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
|
||||
} catch (RuntimeException e) {
|
||||
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
|
||||
}
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
|
||||
if (arbitraryDataFile != null) {
|
||||
String digest58 = arbitraryDataFile.digest58();
|
||||
if (digest58 != null) {
|
||||
return "http://localhost:12393/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret());
|
||||
}
|
||||
}
|
||||
return "Unable to generate preview URL";
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/authorize/{resourceId}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
@@ -136,34 +83,46 @@ public class RenderResource {
|
||||
@GET
|
||||
@Path("/signature/{signature}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature) {
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, "/", null, "/render/signature", true, true);
|
||||
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/signature/{signature}/{path:.*}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath) {
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, inPath,null, "/render/signature", true, true);
|
||||
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/hash/{hash}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58) {
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, "/", secret58, "/render/hash", true, false);
|
||||
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/hash/{hash}/{path:.*}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
|
||||
@QueryParam("secret") String secret58) {
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, inPath, secret58, "/render/hash", true, false);
|
||||
@QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme);
|
||||
}
|
||||
|
||||
@GET
|
||||
@@ -171,29 +130,41 @@ public class RenderResource {
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathByName(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("path") String inPath) {
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
@PathParam("path") String inPath,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, inPath, null, prefix, true, true);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("{service}/{name}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
|
||||
@PathParam("name") String name) {
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, "/", null, prefix, true, true);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
|
||||
String secret58, String prefix, boolean usePrefix, boolean async) {
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
|
||||
String inPath, String secret58, String prefix, boolean usePrefix, boolean async, String theme) {
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
|
||||
secret58, prefix, usePrefix, async, request, response, context);
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
|
||||
secret58, prefix, usePrefix, async, "render", request, response, context);
|
||||
|
||||
if (theme != null) {
|
||||
renderer.setTheme(theme);
|
||||
}
|
||||
return renderer.render();
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package org.qortal.api.websocket;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
@@ -21,6 +23,8 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
|
||||
import static org.qortal.data.chat.ChatMessage.Encoding;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
@@ -62,7 +66,9 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
/* ignored */
|
||||
if (Objects.equals(message, "ping")) {
|
||||
session.getRemote().sendStringByFuture("pong");
|
||||
}
|
||||
}
|
||||
|
||||
private void onNotify(Session session, ChatTransactionData chatTransactionData, String ourAddress, AtomicReference<String> previousOutput) {
|
||||
@@ -75,7 +81,7 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress);
|
||||
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
|
||||
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
|
||||
@@ -93,4 +99,12 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
||||
}
|
||||
}
|
||||
|
||||
private Encoding getTargetEncoding(Session session) {
|
||||
// Default to Base58 if not specified, for backwards support
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
List<String> encodingList = queryParams.get("encoding");
|
||||
String encoding = (encodingList != null && encodingList.size() == 1) ? encodingList.get(0) : "BASE58";
|
||||
return Encoding.valueOf(encoding);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,10 +2,7 @@ package org.qortal.api.websocket;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.WebSocketException;
|
||||
@@ -22,6 +19,8 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
|
||||
import static org.qortal.data.chat.ChatMessage.Encoding;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
@@ -35,6 +34,16 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
Encoding encoding = getTargetEncoding(session);
|
||||
|
||||
List<String> limitList = queryParams.get("limit");
|
||||
Integer limit = (limitList != null && limitList.size() == 1) ? Integer.parseInt(limitList.get(0)) : null;
|
||||
|
||||
List<String> offsetList = queryParams.get("offset");
|
||||
Integer offset = (offsetList != null && offsetList.size() == 1) ? Integer.parseInt(offsetList.get(0)) : null;
|
||||
|
||||
List<String> reverseList = queryParams.get("offset");
|
||||
Boolean reverse = (reverseList != null && reverseList.size() == 1) ? Boolean.getBoolean(reverseList.get(0)) : null;
|
||||
|
||||
List<String> txGroupIds = queryParams.get("txGroupId");
|
||||
if (txGroupIds != null && txGroupIds.size() == 1) {
|
||||
@@ -46,7 +55,12 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
null,
|
||||
txGroupId,
|
||||
null,
|
||||
null, null, null);
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
encoding,
|
||||
limit, offset, reverse);
|
||||
|
||||
sendMessages(session, chatMessages);
|
||||
} catch (DataException e) {
|
||||
@@ -69,11 +83,16 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<ChatMessage> chatMessages = repository.getChatRepository().getMessagesMatchingCriteria(
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
involvingAddresses,
|
||||
null, null, null);
|
||||
null,
|
||||
encoding,
|
||||
limit, offset, reverse);
|
||||
|
||||
sendMessages(session, chatMessages);
|
||||
} catch (DataException e) {
|
||||
@@ -99,7 +118,9 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
/* ignored */
|
||||
if (Objects.equals(message, "ping")) {
|
||||
session.getRemote().sendStringByFuture("pong");
|
||||
}
|
||||
}
|
||||
|
||||
private void onNotify(Session session, ChatTransactionData chatTransactionData, int txGroupId) {
|
||||
@@ -147,7 +168,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
// Convert ChatTransactionData to ChatMessage
|
||||
ChatMessage chatMessage;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
chatMessage = repository.getChatRepository().toChatMessage(chatTransactionData);
|
||||
chatMessage = repository.getChatRepository().toChatMessage(chatTransactionData, getTargetEncoding(session));
|
||||
} catch (DataException e) {
|
||||
// No output this time?
|
||||
return;
|
||||
@@ -156,4 +177,12 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
|
||||
sendMessages(session, Collections.singletonList(chatMessage));
|
||||
}
|
||||
|
||||
private Encoding getTargetEncoding(Session session) {
|
||||
// Default to Base58 if not specified, for backwards support
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
List<String> encodingList = queryParams.get("encoding");
|
||||
String encoding = (encodingList != null && encodingList.size() == 1) ? encodingList.get(0) : "BASE58";
|
||||
return Encoding.valueOf(encoding);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
|
||||
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.PresenceTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@@ -99,13 +100,13 @@ public class PresenceWebSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
// We use NewBlockEvent as a proxy for 1-minute timer
|
||||
if (!(event instanceof Controller.NewTransactionEvent) && !(event instanceof Controller.NewBlockEvent))
|
||||
// We use Synchronizer.NewChainTipEvent as a proxy for 1-minute timer
|
||||
if (!(event instanceof Controller.NewTransactionEvent) && !(event instanceof Synchronizer.NewChainTipEvent))
|
||||
return;
|
||||
|
||||
removeOldEntries();
|
||||
|
||||
if (event instanceof Controller.NewBlockEvent)
|
||||
if (event instanceof Synchronizer.NewChainTipEvent)
|
||||
// We only wanted a chance to cull old entries
|
||||
return;
|
||||
|
||||
|
||||
@@ -2,10 +2,7 @@ package org.qortal.api.websocket;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
@@ -85,6 +82,7 @@ public class TradeBotWebSocket extends ApiWebSocket implements Listener {
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
|
||||
|
||||
List<String> foreignBlockchains = queryParams.get("foreignBlockchain");
|
||||
final String foreignBlockchain = foreignBlockchains == null ? null : foreignBlockchains.get(0);
|
||||
@@ -98,15 +96,22 @@ public class TradeBotWebSocket extends ApiWebSocket implements Listener {
|
||||
// save session's preferred blockchain (if any)
|
||||
sessionBlockchain.put(session, foreignBlockchain);
|
||||
|
||||
// Send all known trade-bot entries
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<TradeBotData> tradeBotEntries = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
|
||||
// Optional filtering
|
||||
if (foreignBlockchain != null)
|
||||
tradeBotEntries = tradeBotEntries.stream()
|
||||
.filter(tradeBotData -> tradeBotData.getForeignBlockchain().equals(foreignBlockchain))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Maybe send all known trade-bot entries
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<TradeBotData> tradeBotEntries = new ArrayList<>();
|
||||
|
||||
// We might need to exclude the initial data from the response
|
||||
if (!excludeInitialData) {
|
||||
tradeBotEntries = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
|
||||
// Optional filtering
|
||||
if (foreignBlockchain != null)
|
||||
tradeBotEntries = tradeBotEntries.stream()
|
||||
.filter(tradeBotData -> tradeBotData.getForeignBlockchain().equals(foreignBlockchain))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
if (!sendEntries(session, tradeBotEntries)) {
|
||||
session.close(4002, "websocket issue");
|
||||
|
||||
@@ -23,6 +23,7 @@ import org.eclipse.jetty.websocket.api.annotations.WebSocket;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.api.model.CrossChainOfferSummary;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.crosschain.SupportedBlockchain;
|
||||
import org.qortal.crosschain.ACCT;
|
||||
import org.qortal.crosschain.AcctMode;
|
||||
@@ -80,10 +81,10 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
if (!(event instanceof Controller.NewBlockEvent))
|
||||
if (!(event instanceof Synchronizer.NewChainTipEvent))
|
||||
return;
|
||||
|
||||
BlockData blockData = ((Controller.NewBlockEvent) event).getBlockData();
|
||||
BlockData blockData = ((Synchronizer.NewChainTipEvent) event).getNewChainTip();
|
||||
|
||||
// Process any new info
|
||||
|
||||
@@ -172,6 +173,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
public void onWebSocketConnect(Session session) {
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
final boolean includeHistoric = queryParams.get("includeHistoric") != null;
|
||||
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
|
||||
|
||||
List<String> foreignBlockchains = queryParams.get("foreignBlockchain");
|
||||
final String foreignBlockchain = foreignBlockchains == null ? null : foreignBlockchains.get(0);
|
||||
@@ -188,20 +190,23 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
List<CrossChainOfferSummary> crossChainOfferSummaries = new ArrayList<>();
|
||||
|
||||
synchronized (cachedInfoByBlockchain) {
|
||||
Collection<CachedOfferInfo> cachedInfos;
|
||||
// We might need to exclude the initial data from the response
|
||||
if (!excludeInitialData) {
|
||||
synchronized (cachedInfoByBlockchain) {
|
||||
Collection<CachedOfferInfo> cachedInfos;
|
||||
|
||||
if (foreignBlockchain == null)
|
||||
// No preferred blockchain, so iterate through all of them
|
||||
cachedInfos = cachedInfoByBlockchain.values();
|
||||
else
|
||||
cachedInfos = Collections.singleton(cachedInfoByBlockchain.computeIfAbsent(foreignBlockchain, k -> new CachedOfferInfo()));
|
||||
if (foreignBlockchain == null)
|
||||
// No preferred blockchain, so iterate through all of them
|
||||
cachedInfos = cachedInfoByBlockchain.values();
|
||||
else
|
||||
cachedInfos = Collections.singleton(cachedInfoByBlockchain.computeIfAbsent(foreignBlockchain, k -> new CachedOfferInfo()));
|
||||
|
||||
for (CachedOfferInfo cachedInfo : cachedInfos) {
|
||||
crossChainOfferSummaries.addAll(cachedInfo.currentSummaries.values());
|
||||
for (CachedOfferInfo cachedInfo : cachedInfos) {
|
||||
crossChainOfferSummaries.addAll(cachedInfo.currentSummaries.values());
|
||||
|
||||
if (includeHistoric)
|
||||
crossChainOfferSummaries.addAll(cachedInfo.historicSummaries.values());
|
||||
if (includeHistoric)
|
||||
crossChainOfferSummaries.addAll(cachedInfo.historicSummaries.values());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,141 @@
|
||||
package org.qortal.api.websocket;
|
||||
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.annotations.*;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.tradebot.TradeBot;
|
||||
import org.qortal.data.network.TradePresenceData;
|
||||
import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.Listener;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.*;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class TradePresenceWebSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
/** Map key is public key in base58, map value is trade presence */
|
||||
private static final Map<String, TradePresenceData> currentEntries = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
@Override
|
||||
public void configure(WebSocketServletFactory factory) {
|
||||
factory.register(TradePresenceWebSocket.class);
|
||||
|
||||
populateCurrentInfo();
|
||||
|
||||
EventBus.INSTANCE.addListener(this::listen);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
// XXX - Suggest we change this to something like Synchronizer.NewChainTipEvent?
|
||||
// We use NewBlockEvent as a proxy for 1-minute timer
|
||||
if (!(event instanceof TradeBot.TradePresenceEvent) && !(event instanceof Controller.NewBlockEvent))
|
||||
return;
|
||||
|
||||
removeOldEntries();
|
||||
|
||||
if (event instanceof Controller.NewBlockEvent)
|
||||
// We only wanted a chance to cull old entries
|
||||
return;
|
||||
|
||||
TradePresenceData tradePresence = ((TradeBot.TradePresenceEvent) event).getTradePresenceData();
|
||||
|
||||
boolean somethingChanged = mergePresence(tradePresence);
|
||||
|
||||
if (!somethingChanged)
|
||||
// nothing changed
|
||||
return;
|
||||
|
||||
List<TradePresenceData> tradePresences = Collections.singletonList(tradePresence);
|
||||
|
||||
// Notify sessions
|
||||
for (Session session : getSessions()) {
|
||||
sendTradePresences(session, tradePresences);
|
||||
}
|
||||
}
|
||||
|
||||
@OnWebSocketConnect
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
||||
final boolean excludeInitialData = queryParams.get("excludeInitialData") != null;
|
||||
|
||||
List<TradePresenceData> tradePresences = new ArrayList<>();
|
||||
|
||||
// We might need to exclude the initial data from the response
|
||||
if (!excludeInitialData) {
|
||||
synchronized (currentEntries) {
|
||||
tradePresences = List.copyOf(currentEntries.values());
|
||||
}
|
||||
}
|
||||
|
||||
if (!sendTradePresences(session, tradePresences)) {
|
||||
session.close(4002, "websocket issue");
|
||||
return;
|
||||
}
|
||||
|
||||
super.onWebSocketConnect(session);
|
||||
}
|
||||
|
||||
@OnWebSocketClose
|
||||
@Override
|
||||
public void onWebSocketClose(Session session, int statusCode, String reason) {
|
||||
// clean up
|
||||
super.onWebSocketClose(session, statusCode, reason);
|
||||
}
|
||||
|
||||
@OnWebSocketError
|
||||
public void onWebSocketError(Session session, Throwable throwable) {
|
||||
/* ignored */
|
||||
}
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
/* ignored */
|
||||
}
|
||||
|
||||
private boolean sendTradePresences(Session session, List<TradePresenceData> tradePresences) {
|
||||
try {
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
marshall(stringWriter, tradePresences);
|
||||
|
||||
String output = stringWriter.toString();
|
||||
session.getRemote().sendStringByFuture(output);
|
||||
} catch (IOException e) {
|
||||
// No output this time?
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void populateCurrentInfo() {
|
||||
// We want ALL trade presences
|
||||
TradeBot.getInstance().getAllTradePresences().stream()
|
||||
.forEach(TradePresenceWebSocket::mergePresence);
|
||||
}
|
||||
|
||||
/** Merge trade presence into cache of current entries, returns true if cache was updated. */
|
||||
private static boolean mergePresence(TradePresenceData tradePresence) {
|
||||
// Put/replace for this publickey making sure we keep newest timestamp
|
||||
String pubKey58 = Base58.encode(tradePresence.getPublicKey());
|
||||
|
||||
TradePresenceData newEntry = currentEntries.compute(pubKey58, (k, v) -> v == null || v.getTimestamp() < tradePresence.getTimestamp() ? tradePresence : v);
|
||||
|
||||
return newEntry == tradePresence;
|
||||
}
|
||||
|
||||
private static void removeOldEntries() {
|
||||
long now = NTP.getTime();
|
||||
|
||||
currentEntries.values().removeIf(v -> v.getTimestamp() < now);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package org.qortal.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.exception.DataNotPublishedException;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataCache;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
@@ -88,7 +89,7 @@ public class ArbitraryDataBuilder {
|
||||
if (latestPut == null) {
|
||||
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
|
||||
this.name, this.service, this.identifierString());
|
||||
throw new DataException(message);
|
||||
throw new DataNotPublishedException(message);
|
||||
}
|
||||
this.latestPutTransaction = latestPut;
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
@@ -15,7 +16,6 @@ import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Arrays.stream;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
@@ -53,7 +53,8 @@ public class ArbitraryDataFile {
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class);
|
||||
|
||||
public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB
|
||||
public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
|
||||
protected static final int MAX_CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
|
||||
public static final int CHUNK_SIZE = 512 * 1024; // 0.5MiB
|
||||
public static int SHORT_DIGEST_LENGTH = 8;
|
||||
|
||||
protected Path filePath;
|
||||
@@ -72,36 +73,44 @@ public class ArbitraryDataFile {
|
||||
}
|
||||
|
||||
public ArbitraryDataFile(String hash58, byte[] signature) throws DataException {
|
||||
this.createDataDirectory();
|
||||
this.filePath = ArbitraryDataFile.getOutputFilePath(hash58, signature, false);
|
||||
this.chunks = new ArrayList<>();
|
||||
this.hash58 = hash58;
|
||||
this.signature = signature;
|
||||
}
|
||||
|
||||
public ArbitraryDataFile(byte[] fileContent, byte[] signature) throws DataException {
|
||||
public ArbitraryDataFile(byte[] fileContent, byte[] signature, boolean useTemporaryFile) throws DataException {
|
||||
if (fileContent == null) {
|
||||
LOGGER.error("fileContent is null");
|
||||
return;
|
||||
}
|
||||
|
||||
this.chunks = new ArrayList<>();
|
||||
this.hash58 = Base58.encode(Crypto.digest(fileContent));
|
||||
this.signature = signature;
|
||||
LOGGER.trace(String.format("File digest: %s, size: %d bytes", this.hash58, fileContent.length));
|
||||
|
||||
Path outputFilePath = getOutputFilePath(this.hash58, signature, true);
|
||||
Path outputFilePath;
|
||||
if (useTemporaryFile) {
|
||||
try {
|
||||
outputFilePath = Files.createTempFile("qortalRawData", null);
|
||||
outputFilePath.toFile().deleteOnExit();
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new DataException(String.format("Unable to write data with hash %s to temporary file: %s", this.hash58, e.getMessage()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
outputFilePath = getOutputFilePath(this.hash58, signature, true);
|
||||
}
|
||||
|
||||
File outputFile = outputFilePath.toFile();
|
||||
try (FileOutputStream outputStream = new FileOutputStream(outputFile)) {
|
||||
outputStream.write(fileContent);
|
||||
this.filePath = outputFilePath;
|
||||
// Verify hash
|
||||
if (!this.hash58.equals(this.digest58())) {
|
||||
LOGGER.error("Hash {} does not match file digest {}", this.hash58, this.digest58());
|
||||
this.delete();
|
||||
throw new DataException("Data file digest validation failed");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Unable to write data to file");
|
||||
this.delete();
|
||||
throw new DataException(String.format("Unable to write data with hash %s: %s", this.hash58, e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,9 +119,47 @@ public class ArbitraryDataFile {
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromHash(byte[] hash, byte[] signature) throws DataException {
|
||||
if (hash == null) {
|
||||
return null;
|
||||
}
|
||||
return ArbitraryDataFile.fromHash58(Base58.encode(hash), signature);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromRawData(byte[] data, byte[] signature) throws DataException {
|
||||
if (data == null) {
|
||||
return null;
|
||||
}
|
||||
return new ArbitraryDataFile(data, signature, true);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromTransactionData(ArbitraryTransactionData transactionData) throws DataException {
|
||||
ArbitraryDataFile arbitraryDataFile = null;
|
||||
byte[] signature = transactionData.getSignature();
|
||||
byte[] data = transactionData.getData();
|
||||
|
||||
if (data == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Create data file
|
||||
switch (transactionData.getDataType()) {
|
||||
case DATA_HASH:
|
||||
arbitraryDataFile = ArbitraryDataFile.fromHash(data, signature);
|
||||
break;
|
||||
|
||||
case RAW_DATA:
|
||||
arbitraryDataFile = ArbitraryDataFile.fromRawData(data, signature);
|
||||
break;
|
||||
}
|
||||
|
||||
// Set metadata hash
|
||||
if (arbitraryDataFile != null) {
|
||||
arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash());
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromPath(Path path, byte[] signature) {
|
||||
if (path == null) {
|
||||
return null;
|
||||
@@ -146,19 +193,6 @@ public class ArbitraryDataFile {
|
||||
return ArbitraryDataFile.fromPath(Paths.get(file.getPath()), signature);
|
||||
}
|
||||
|
||||
private boolean createDataDirectory() {
|
||||
// Create the data directory if it doesn't exist
|
||||
String dataPath = Settings.getInstance().getDataPath();
|
||||
Path dataDirectory = Paths.get(dataPath);
|
||||
try {
|
||||
Files.createDirectories(dataDirectory);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Unable to create data directory");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private Path copyToDataDirectory(Path sourcePath, byte[] signature) throws DataException {
|
||||
if (this.hash58 == null || this.filePath == null) {
|
||||
return null;
|
||||
@@ -275,6 +309,11 @@ public class ArbitraryDataFile {
|
||||
this.chunks = new ArrayList<>();
|
||||
|
||||
if (file != null) {
|
||||
if (file.exists() && file.length() <= chunkSize) {
|
||||
// No need to split into chunks if we're already below the chunk size
|
||||
return 0;
|
||||
}
|
||||
|
||||
try (FileInputStream fileInputStream = new FileInputStream(file);
|
||||
BufferedInputStream bis = new BufferedInputStream(fileInputStream)) {
|
||||
|
||||
@@ -366,6 +405,21 @@ public class ArbitraryDataFile {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean delete(int attempts) {
|
||||
// Keep trying to delete the data until it is deleted, or we reach 10 attempts
|
||||
for (int i=0; i<attempts; i++) {
|
||||
if (this.delete()) {
|
||||
return true;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(1000L);
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit method
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean deleteAllChunks() {
|
||||
boolean success = false;
|
||||
|
||||
@@ -388,12 +442,15 @@ public class ArbitraryDataFile {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean deleteAll() {
|
||||
public boolean deleteAll(boolean deleteMetadata) {
|
||||
// Delete the complete file
|
||||
boolean fileDeleted = this.delete();
|
||||
|
||||
// Delete the metadata file
|
||||
boolean metadataDeleted = this.deleteMetadata();
|
||||
// Delete the metadata file if requested
|
||||
boolean metadataDeleted = false;
|
||||
if (deleteMetadata) {
|
||||
metadataDeleted = this.deleteMetadata();
|
||||
}
|
||||
|
||||
// Delete the individual chunks
|
||||
boolean chunksDeleted = this.deleteAllChunks();
|
||||
@@ -473,6 +530,14 @@ public class ArbitraryDataFile {
|
||||
|
||||
// Read the metadata
|
||||
List<byte[]> chunks = metadata.getChunks();
|
||||
|
||||
// If the chunks array is empty, then this resource has no chunks,
|
||||
// so we must return false to avoid confusing the caller.
|
||||
if (chunks.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Otherwise, we need to check each chunk individually
|
||||
for (byte[] chunkHash : chunks) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
if (!chunk.exists()) {
|
||||
@@ -604,6 +669,22 @@ public class ArbitraryDataFile {
|
||||
return this.chunks.size();
|
||||
}
|
||||
|
||||
public int fileCount() {
|
||||
int fileCount = this.chunkCount();
|
||||
|
||||
if (fileCount == 0) {
|
||||
// Transactions without any chunks can already be treated as a complete file
|
||||
fileCount++;
|
||||
}
|
||||
|
||||
if (this.getMetadataHash() != null) {
|
||||
// Add the metadata file
|
||||
fileCount++;
|
||||
}
|
||||
|
||||
return fileCount;
|
||||
}
|
||||
|
||||
public List<ArbitraryDataFileChunk> getChunks() {
|
||||
return this.chunks;
|
||||
}
|
||||
@@ -771,6 +852,10 @@ public class ArbitraryDataFile {
|
||||
this.loadMetadata();
|
||||
}
|
||||
|
||||
public ArbitraryDataTransactionMetadata getMetadata() {
|
||||
return this.metadata;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.shortHash58();
|
||||
|
||||
@@ -18,7 +18,7 @@ public class ArbitraryDataFileChunk extends ArbitraryDataFile {
|
||||
}
|
||||
|
||||
public ArbitraryDataFileChunk(byte[] fileContent, byte[] signature) throws DataException {
|
||||
super(fileContent, signature);
|
||||
super(fileContent, signature, false);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFileChunk fromHash58(String hash58, byte[] signature) throws DataException {
|
||||
@@ -40,8 +40,8 @@ public class ArbitraryDataFileChunk extends ArbitraryDataFile {
|
||||
try {
|
||||
// Validate the file size (chunks have stricter limits)
|
||||
long fileSize = Files.size(this.filePath);
|
||||
if (fileSize > CHUNK_SIZE) {
|
||||
LOGGER.error(String.format("DataFileChunk is too large: %d bytes (max chunk size: %d bytes)", fileSize, CHUNK_SIZE));
|
||||
if (fileSize > MAX_CHUNK_SIZE) {
|
||||
LOGGER.error(String.format("DataFileChunk is too large: %d bytes (max chunk size: %d bytes)", fileSize, MAX_CHUNK_SIZE));
|
||||
return ValidationResult.FILE_TOO_LARGE;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import org.qortal.arbitrary.exception.DataNotPublishedException;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
|
||||
@@ -18,10 +19,7 @@ import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.*;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.ZipUtils;
|
||||
import org.qortal.utils.*;
|
||||
|
||||
import javax.crypto.BadPaddingException;
|
||||
import javax.crypto.IllegalBlockSizeException;
|
||||
@@ -59,6 +57,9 @@ public class ArbitraryDataReader {
|
||||
private int layerCount;
|
||||
private byte[] latestSignature;
|
||||
|
||||
// The resource being read
|
||||
ArbitraryDataResource arbitraryDataResource = null;
|
||||
|
||||
public ArbitraryDataReader(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
|
||||
// Ensure names are always lowercase
|
||||
if (resourceIdType == ResourceIdType.NAME) {
|
||||
@@ -115,6 +116,11 @@ public class ArbitraryDataReader {
|
||||
return new ArbitraryDataBuildQueueItem(this.resourceId, this.resourceIdType, this.service, this.identifier);
|
||||
}
|
||||
|
||||
private ArbitraryDataResource createArbitraryDataResource() {
|
||||
return new ArbitraryDataResource(this.resourceId, this.resourceIdType, this.service, this.identifier);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* loadAsynchronously
|
||||
*
|
||||
@@ -162,6 +168,8 @@ public class ArbitraryDataReader {
|
||||
return;
|
||||
}
|
||||
|
||||
this.arbitraryDataResource = this.createArbitraryDataResource();
|
||||
|
||||
this.preExecute();
|
||||
this.deleteExistingFiles();
|
||||
this.fetch();
|
||||
@@ -169,9 +177,18 @@ public class ArbitraryDataReader {
|
||||
this.uncompress();
|
||||
this.validate();
|
||||
|
||||
} catch (DataException e) {
|
||||
} catch (DataNotPublishedException e) {
|
||||
if (e.getMessage() != null) {
|
||||
// Log the message only, to avoid spamming the logs with a full stack trace
|
||||
LOGGER.debug("DataNotPublishedException when trying to load QDN resource: {}", e.getMessage());
|
||||
}
|
||||
this.deleteWorkingDirectory();
|
||||
throw new DataException(e.getMessage());
|
||||
throw e;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("DataException when trying to load QDN resource", e);
|
||||
this.deleteWorkingDirectory();
|
||||
throw e;
|
||||
|
||||
} finally {
|
||||
this.postExecute();
|
||||
@@ -208,8 +225,13 @@ public class ArbitraryDataReader {
|
||||
* serve a cached version of the resource for subsequent requests.
|
||||
* @throws IOException
|
||||
*/
|
||||
private void deleteWorkingDirectory() throws IOException {
|
||||
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
|
||||
private void deleteWorkingDirectory() {
|
||||
try {
|
||||
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
|
||||
} catch (IOException e) {
|
||||
// Ignore failures as this isn't an essential step
|
||||
LOGGER.info("Unable to delete working path {}: {}", this.workingPath, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void createUncompressedDirectory() throws DataException {
|
||||
@@ -337,11 +359,6 @@ public class ArbitraryDataReader {
|
||||
throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId));
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
// Load secret
|
||||
byte[] secret = transactionData.getSecret();
|
||||
if (secret != null) {
|
||||
@@ -349,16 +366,14 @@ public class ArbitraryDataReader {
|
||||
}
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
if (!arbitraryDataFile.allFilesExist()) {
|
||||
if (ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())) {
|
||||
if (ListUtils.isNameBlocked(transactionData.getName())) {
|
||||
throw new DataException(
|
||||
String.format("Unable to request missing data for file %s because the name is blocked", arbitraryDataFile));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Ask the arbitrary data manager to fetch data for this transaction
|
||||
String message;
|
||||
if (this.canRequestMissingFiles) {
|
||||
@@ -369,8 +384,7 @@ public class ArbitraryDataReader {
|
||||
} else {
|
||||
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
message = String.format("Missing data for file %s", arbitraryDataFile);
|
||||
}
|
||||
|
||||
@@ -380,21 +394,25 @@ public class ArbitraryDataReader {
|
||||
}
|
||||
}
|
||||
|
||||
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
|
||||
// We have all the chunks but not the complete file, so join them
|
||||
arbitraryDataFile.join();
|
||||
// Data hashes need some extra processing
|
||||
if (transactionData.getDataType() == DataType.DATA_HASH) {
|
||||
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
|
||||
// We have all the chunks but not the complete file, so join them
|
||||
arbitraryDataFile.join();
|
||||
}
|
||||
|
||||
// If the complete file still doesn't exist then something went wrong
|
||||
if (!arbitraryDataFile.exists()) {
|
||||
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
|
||||
}
|
||||
// Ensure the complete hash matches the joined chunks
|
||||
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
|
||||
// Delete the invalid file
|
||||
arbitraryDataFile.delete();
|
||||
throw new DataException("Unable to validate complete file hash");
|
||||
}
|
||||
}
|
||||
|
||||
// If the complete file still doesn't exist then something went wrong
|
||||
if (!arbitraryDataFile.exists()) {
|
||||
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
|
||||
}
|
||||
// Ensure the complete hash matches the joined chunks
|
||||
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
|
||||
// Delete the invalid file
|
||||
arbitraryDataFile.delete();
|
||||
throw new DataException("Unable to validate complete file hash");
|
||||
}
|
||||
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
|
||||
arbitraryDataFile.validateFileSize(transactionData.getSize());
|
||||
|
||||
@@ -408,6 +426,7 @@ public class ArbitraryDataReader {
|
||||
this.decryptUsingAlgo("AES/CBC/PKCS5Padding");
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Unable to decrypt using specific parameters: {}", e.getMessage());
|
||||
// Something went wrong, so fall back to default AES params (necessary for legacy resource support)
|
||||
this.decryptUsingAlgo("AES");
|
||||
|
||||
@@ -420,8 +439,9 @@ public class ArbitraryDataReader {
|
||||
byte[] secret = this.secret58 != null ? Base58.decode(this.secret58) : null;
|
||||
if (secret != null && secret.length == Transformer.AES256_LENGTH) {
|
||||
try {
|
||||
LOGGER.debug("Decrypting {} using algorithm {}...", this.arbitraryDataResource, algorithm);
|
||||
Path unencryptedPath = Paths.get(this.workingPath.toString(), "zipped.zip");
|
||||
SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, algorithm);
|
||||
SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, "AES");
|
||||
AES.decryptFile(algorithm, aesKey, this.filePath.toString(), unencryptedPath.toString());
|
||||
|
||||
// Replace filePath pointer with the encrypted file path
|
||||
@@ -430,7 +450,8 @@ public class ArbitraryDataReader {
|
||||
|
||||
} catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException
|
||||
| BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) {
|
||||
throw new DataException(String.format("Unable to decrypt file at path %s: %s", this.filePath, e.getMessage()));
|
||||
LOGGER.info(String.format("Exception when decrypting %s using algorithm %s", this.arbitraryDataResource, algorithm), e);
|
||||
throw new DataException(String.format("Unable to decrypt file at path %s using algorithm %s: %s", this.filePath, algorithm, e.getMessage()));
|
||||
}
|
||||
} else {
|
||||
// Assume it is unencrypted. This will be the case when we have built a custom path by combining
|
||||
@@ -477,7 +498,12 @@ public class ArbitraryDataReader {
|
||||
// Delete original compressed file
|
||||
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
|
||||
if (Files.exists(this.filePath)) {
|
||||
Files.delete(this.filePath);
|
||||
try {
|
||||
Files.delete(this.filePath);
|
||||
} catch (IOException e) {
|
||||
// Ignore failures as this isn't an essential step
|
||||
LOGGER.info("Unable to delete file at path {}", this.filePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package org.qortal.arbitrary;
|
||||
|
||||
import com.google.common.io.Resources;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.HTMLParser;
|
||||
@@ -34,27 +35,32 @@ public class ArbitraryDataRenderer {
|
||||
private final String resourceId;
|
||||
private final ResourceIdType resourceIdType;
|
||||
private final Service service;
|
||||
private final String identifier;
|
||||
private String theme = "light";
|
||||
private String inPath;
|
||||
private final String secret58;
|
||||
private final String prefix;
|
||||
private final boolean usePrefix;
|
||||
private final boolean async;
|
||||
private final String qdnContext;
|
||||
private final HttpServletRequest request;
|
||||
private final HttpServletResponse response;
|
||||
private final ServletContext context;
|
||||
|
||||
public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
|
||||
String secret58, String prefix, boolean usePrefix, boolean async,
|
||||
public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
|
||||
String inPath, String secret58, String prefix, boolean usePrefix, boolean async, String qdnContext,
|
||||
HttpServletRequest request, HttpServletResponse response, ServletContext context) {
|
||||
|
||||
this.resourceId = resourceId;
|
||||
this.resourceIdType = resourceIdType;
|
||||
this.service = service;
|
||||
this.identifier = identifier != null ? identifier : "default";
|
||||
this.inPath = inPath;
|
||||
this.secret58 = secret58;
|
||||
this.prefix = prefix;
|
||||
this.usePrefix = usePrefix;
|
||||
this.async = async;
|
||||
this.qdnContext = qdnContext;
|
||||
this.request = request;
|
||||
this.response = response;
|
||||
this.context = context;
|
||||
@@ -70,14 +76,14 @@ public class ArbitraryDataRenderer {
|
||||
return ArbitraryDataRenderer.getResponse(response, 500, "QDN is disabled in settings");
|
||||
}
|
||||
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, null);
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, identifier);
|
||||
arbitraryDataReader.setSecret58(secret58); // Optional, used for loading encrypted file hashes only
|
||||
try {
|
||||
if (!arbitraryDataReader.isCachedDataAvailable()) {
|
||||
// If async is requested, show a loading screen whilst build is in progress
|
||||
if (async) {
|
||||
arbitraryDataReader.loadAsynchronously(false, 10);
|
||||
return this.getLoadingResponse(service, resourceId);
|
||||
return this.getLoadingResponse(service, resourceId, identifier, theme);
|
||||
}
|
||||
|
||||
// Otherwise, loop until we have data
|
||||
@@ -110,25 +116,61 @@ public class ArbitraryDataRenderer {
|
||||
}
|
||||
String unzippedPath = path.toString();
|
||||
|
||||
// Set path automatically for single file resources (except for apps, which handle routing differently)
|
||||
String[] files = ArrayUtils.removeElement(new File(unzippedPath).list(), ".qortal");
|
||||
if (files.length == 1 && this.service != Service.APP) {
|
||||
// This is a single file resource
|
||||
inPath = files[0];
|
||||
}
|
||||
|
||||
try {
|
||||
String filename = this.getFilename(unzippedPath, inPath);
|
||||
String filePath = Paths.get(unzippedPath, filename).toString();
|
||||
Path filePath = Paths.get(unzippedPath, filename);
|
||||
boolean usingCustomRouting = false;
|
||||
|
||||
// If the file doesn't exist, we may need to route the request elsewhere, or cleanup
|
||||
if (!Files.exists(filePath)) {
|
||||
if (inPath.equals("/")) {
|
||||
// Delete the unzipped folder if no index file was found
|
||||
try {
|
||||
FileUtils.deleteDirectory(new File(unzippedPath));
|
||||
} catch (IOException e) {
|
||||
LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
|
||||
}
|
||||
}
|
||||
|
||||
// If this is an app, then forward all unhandled requests to the index, to give the app the option to route it
|
||||
if (this.service == Service.APP) {
|
||||
// Locate index file
|
||||
List<String> indexFiles = ArbitraryDataRenderer.indexFiles();
|
||||
for (String indexFile : indexFiles) {
|
||||
Path indexPath = Paths.get(unzippedPath, indexFile);
|
||||
if (Files.exists(indexPath)) {
|
||||
// Forward request to index file
|
||||
filePath = indexPath;
|
||||
filename = indexFile;
|
||||
usingCustomRouting = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (HTMLParser.isHtmlFile(filename)) {
|
||||
// HTML file - needs to be parsed
|
||||
byte[] data = Files.readAllBytes(Paths.get(filePath)); // TODO: limit file size that can be read into memory
|
||||
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, usePrefix, data);
|
||||
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
|
||||
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, usePrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
|
||||
htmlParser.addAdditionalHeaderTags();
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline'; media-src 'self' blob:");
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:;");
|
||||
response.setContentType(context.getMimeType(filename));
|
||||
response.setContentLength(htmlParser.getData().length);
|
||||
response.getOutputStream().write(htmlParser.getData());
|
||||
}
|
||||
else {
|
||||
// Regular file - can be streamed directly
|
||||
File file = new File(filePath);
|
||||
File file = filePath.toFile();
|
||||
FileInputStream inputStream = new FileInputStream(file);
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline'; media-src 'self' blob:");
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self'");
|
||||
response.setContentType(context.getMimeType(filename));
|
||||
int bytesRead, length = 0;
|
||||
byte[] buffer = new byte[10240];
|
||||
@@ -142,14 +184,6 @@ public class ArbitraryDataRenderer {
|
||||
return response;
|
||||
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||
LOGGER.info("Unable to serve file: {}", e.getMessage());
|
||||
if (inPath.equals("/")) {
|
||||
// Delete the unzipped folder if no index file was found
|
||||
try {
|
||||
FileUtils.deleteDirectory(new File(unzippedPath));
|
||||
} catch (IOException ioException) {
|
||||
LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to serve file at path {}: {}", inPath, e.getMessage());
|
||||
}
|
||||
@@ -171,7 +205,7 @@ public class ArbitraryDataRenderer {
|
||||
return userPath;
|
||||
}
|
||||
|
||||
private HttpServletResponse getLoadingResponse(Service service, String name) {
|
||||
private HttpServletResponse getLoadingResponse(Service service, String name, String identifier, String theme) {
|
||||
String responseString = "";
|
||||
URL url = Resources.getResource("loading/index.html");
|
||||
try {
|
||||
@@ -180,6 +214,8 @@ public class ArbitraryDataRenderer {
|
||||
// Replace vars
|
||||
responseString = responseString.replace("%%SERVICE%%", service.toString());
|
||||
responseString = responseString.replace("%%NAME%%", name);
|
||||
responseString = responseString.replace("%%IDENTIFIER%%", identifier);
|
||||
responseString = responseString.replace("%%THEME%%", theme);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to show loading screen: {}", e.getMessage());
|
||||
@@ -210,4 +246,8 @@ public class ArbitraryDataRenderer {
|
||||
return indexFiles;
|
||||
}
|
||||
|
||||
public void setTheme(String theme) {
|
||||
this.theme = theme;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,19 +3,21 @@ package org.qortal.arbitrary;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
|
||||
import org.qortal.arbitrary.exception.DataNotPublishedException;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.list.ResourceListManager;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.ListUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
@@ -37,9 +39,11 @@ public class ArbitraryDataResource {
|
||||
|
||||
private List<ArbitraryTransactionData> transactions;
|
||||
private ArbitraryTransactionData latestPutTransaction;
|
||||
private ArbitraryTransactionData latestTransaction;
|
||||
private int layerCount;
|
||||
private Integer localChunkCount = null;
|
||||
private Integer totalChunkCount = null;
|
||||
private boolean exists = false;
|
||||
|
||||
public ArbitraryDataResource(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
|
||||
this.resourceId = resourceId.toLowerCase();
|
||||
@@ -58,6 +62,10 @@ public class ArbitraryDataResource {
|
||||
// Avoid this for "quick" statuses, to speed things up
|
||||
if (!quick) {
|
||||
this.calculateChunkCounts();
|
||||
|
||||
if (!this.exists) {
|
||||
return new ArbitraryResourceStatus(Status.NOT_PUBLISHED, this.localChunkCount, this.totalChunkCount);
|
||||
}
|
||||
}
|
||||
|
||||
if (resourceIdType != ResourceIdType.NAME) {
|
||||
@@ -66,8 +74,7 @@ public class ArbitraryDataResource {
|
||||
}
|
||||
|
||||
// Check if the name is blocked
|
||||
if (ResourceListManager.getInstance()
|
||||
.listContains("blockedNames", this.resourceId, false)) {
|
||||
if (ListUtils.isNameBlocked(this.resourceId)) {
|
||||
return new ArbitraryResourceStatus(Status.BLOCKED, this.localChunkCount, this.totalChunkCount);
|
||||
}
|
||||
|
||||
@@ -105,21 +112,47 @@ public class ArbitraryDataResource {
|
||||
return new ArbitraryResourceStatus(Status.DOWNLOADED, this.localChunkCount, this.totalChunkCount);
|
||||
}
|
||||
|
||||
public boolean delete() {
|
||||
public ArbitraryDataTransactionMetadata getLatestTransactionMetadata() {
|
||||
this.fetchLatestTransaction();
|
||||
|
||||
if (latestTransaction != null) {
|
||||
byte[] signature = latestTransaction.getSignature();
|
||||
byte[] metadataHash = latestTransaction.getMetadataHash();
|
||||
if (metadataHash == null) {
|
||||
// This resource doesn't have metadata
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
|
||||
if (metadataFile.exists()) {
|
||||
ArbitraryDataTransactionMetadata transactionMetadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
|
||||
transactionMetadata.read();
|
||||
return transactionMetadata;
|
||||
}
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
// Do nothing
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean delete(boolean deleteMetadata) {
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
|
||||
|
||||
for (ArbitraryTransactionData transactionData : transactionDataList) {
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// Delete any chunks or complete files from each transaction
|
||||
arbitraryDataFile.deleteAll();
|
||||
arbitraryDataFile.deleteAll(deleteMetadata);
|
||||
}
|
||||
|
||||
// Also delete cached data for the entire resource
|
||||
@@ -163,6 +196,9 @@ public class ArbitraryDataResource {
|
||||
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
|
||||
|
||||
@@ -182,6 +218,14 @@ public class ArbitraryDataResource {
|
||||
private void calculateChunkCounts() {
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
this.exists = false;
|
||||
this.localChunkCount = 0;
|
||||
this.totalChunkCount = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
this.exists = true;
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
|
||||
int localChunkCount = 0;
|
||||
@@ -201,6 +245,9 @@ public class ArbitraryDataResource {
|
||||
private boolean isRateLimited() {
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
|
||||
|
||||
@@ -224,6 +271,10 @@ public class ArbitraryDataResource {
|
||||
private boolean isDataPotentiallyAvailable() {
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return false;
|
||||
@@ -255,6 +306,10 @@ public class ArbitraryDataResource {
|
||||
private boolean isDownloading() {
|
||||
try {
|
||||
this.fetchTransactions();
|
||||
if (this.transactions == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return false;
|
||||
@@ -296,7 +351,7 @@ public class ArbitraryDataResource {
|
||||
if (latestPut == null) {
|
||||
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
|
||||
this.resourceId, this.service, this.identifierString());
|
||||
throw new DataException(message);
|
||||
throw new DataNotPublishedException(message);
|
||||
}
|
||||
this.latestPutTransaction = latestPut;
|
||||
|
||||
@@ -306,6 +361,35 @@ public class ArbitraryDataResource {
|
||||
|
||||
this.transactions = transactionDataList;
|
||||
this.layerCount = transactionDataList.size();
|
||||
|
||||
} catch (DataNotPublishedException e) {
|
||||
// Ignore without logging
|
||||
}
|
||||
catch (DataException e) {
|
||||
LOGGER.info(String.format("Repository error when fetching transactions for resource %s: %s", this, e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
private void fetchLatestTransaction() {
|
||||
if (this.latestTransaction != null) {
|
||||
// Already fetched
|
||||
return;
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Get the most recent transaction
|
||||
ArbitraryTransactionData latestTransaction = repository.getArbitraryRepository()
|
||||
.getLatestTransaction(this.resourceId, this.service, null, this.identifier);
|
||||
if (latestTransaction == null) {
|
||||
String message = String.format("Couldn't find transaction for name %s, service %s and identifier %s",
|
||||
this.resourceId, this.service, this.identifierString());
|
||||
throw new DataException(message);
|
||||
}
|
||||
this.latestTransaction = latestTransaction;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info(String.format("Repository error when fetching latest transaction for resource %s: %s", this, e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,8 +6,10 @@ import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
|
||||
import org.qortal.arbitrary.ArbitraryDataDiff.*;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.AES;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.PaymentData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
@@ -27,6 +29,7 @@ import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
|
||||
public class ArbitraryDataTransactionBuilder {
|
||||
@@ -44,6 +47,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
private static final double MAX_FILE_DIFF = 0.5f;
|
||||
|
||||
private final String publicKey58;
|
||||
private final long fee;
|
||||
private final Path path;
|
||||
private final String name;
|
||||
private Method method;
|
||||
@@ -51,15 +55,23 @@ public class ArbitraryDataTransactionBuilder {
|
||||
private final String identifier;
|
||||
private final Repository repository;
|
||||
|
||||
// Metadata
|
||||
private final String title;
|
||||
private final String description;
|
||||
private final List<String> tags;
|
||||
private final Category category;
|
||||
|
||||
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
|
||||
|
||||
private ArbitraryTransactionData arbitraryTransactionData;
|
||||
private ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name,
|
||||
Method method, Service service, String identifier) {
|
||||
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, long fee, Path path, String name,
|
||||
Method method, Service service, String identifier,
|
||||
String title, String description, List<String> tags, Category category) {
|
||||
this.repository = repository;
|
||||
this.publicKey58 = publicKey58;
|
||||
this.fee = fee;
|
||||
this.path = path;
|
||||
this.name = name;
|
||||
this.method = method;
|
||||
@@ -70,6 +82,12 @@ public class ArbitraryDataTransactionBuilder {
|
||||
identifier = null;
|
||||
}
|
||||
this.identifier = identifier;
|
||||
|
||||
// Metadata (optional)
|
||||
this.title = ArbitraryDataTransactionMetadata.limitTitle(title);
|
||||
this.description = ArbitraryDataTransactionMetadata.limitDescription(description);
|
||||
this.tags = ArbitraryDataTransactionMetadata.limitTags(tags);
|
||||
this.category = category;
|
||||
}
|
||||
|
||||
public void build() throws DataException {
|
||||
@@ -108,6 +126,10 @@ public class ArbitraryDataTransactionBuilder {
|
||||
return Method.PUT;
|
||||
}
|
||||
|
||||
// Get existing metadata and see if it matches the new metadata
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(this.name, ResourceIdType.NAME, this.service, this.identifier);
|
||||
ArbitraryDataTransactionMetadata existingMetadata = resource.getLatestTransactionMetadata();
|
||||
|
||||
try {
|
||||
// Check layer count
|
||||
int layerCount = reader.getLayerCount();
|
||||
@@ -118,7 +140,23 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
// Check size of differences between this layer and previous layer
|
||||
ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(reader.getFilePath(), this.path, reader.getLatestSignature());
|
||||
patch.create();
|
||||
try {
|
||||
patch.create();
|
||||
}
|
||||
catch (DataException | IOException e) {
|
||||
// Handle matching states separately, as it's best to block transactions with duplicate states
|
||||
if (e.getMessage().equals("Current state matches previous state. Nothing to do.")) {
|
||||
// Only throw an exception if the metadata is also identical, as well as the data
|
||||
if (this.isMetadataEqual(existingMetadata)) {
|
||||
throw new DataException(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.info("Caught exception when creating patch: {}", e.getMessage());
|
||||
LOGGER.info("Unable to load existing resource - using PUT to overwrite it.");
|
||||
return Method.PUT;
|
||||
}
|
||||
|
||||
long diffSize = FilesystemUtils.getDirectorySize(patch.getFinalPath());
|
||||
long existingStateSize = FilesystemUtils.getDirectorySize(reader.getFilePath());
|
||||
double difference = (double) diffSize / (double) existingStateSize;
|
||||
@@ -144,6 +182,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
for (ModifiedPath path : metadata.getModifiedPaths()) {
|
||||
if (path.getDiffType() != DiffType.COMPLETE_FILE) {
|
||||
atLeastOnePatch = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,14 +191,19 @@ public class ArbitraryDataTransactionBuilder {
|
||||
return Method.PUT;
|
||||
}
|
||||
|
||||
// We can't use PATCH for on-chain data because this requires the .qortal directory, which can't be put on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(this.path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
if (shouldUseOnChainData) {
|
||||
LOGGER.info("Data size is small enough to go on chain - using PUT");
|
||||
return Method.PUT;
|
||||
}
|
||||
|
||||
// State is appropriate for a PATCH transaction
|
||||
return Method.PATCH;
|
||||
}
|
||||
catch (IOException | DataException e) {
|
||||
// Handle matching states separately, as it's best to block transactions with duplicate states
|
||||
if (e.getMessage().equals("Current state matches previous state. Nothing to do.")) {
|
||||
throw new DataException(e.getMessage());
|
||||
}
|
||||
catch (IOException e) {
|
||||
// IMPORTANT: Don't catch DataException here, as they must be passed to the caller
|
||||
LOGGER.info("Caught exception: {}", e.getMessage());
|
||||
LOGGER.info("Unable to load existing resource - using PUT to overwrite it.");
|
||||
return Method.PUT;
|
||||
@@ -195,12 +239,15 @@ public class ArbitraryDataTransactionBuilder {
|
||||
random.nextBytes(lastReference);
|
||||
}
|
||||
|
||||
Compression compression = Compression.ZIP;
|
||||
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
|
||||
// FUTURE? Use zip compression for directories, or no compression for single files
|
||||
// Compression compression = (path.toFile().isDirectory()) ? Compression.ZIP : Compression.NONE;
|
||||
// Use zip compression if data isn't going on chain
|
||||
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;
|
||||
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method, compression);
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method,
|
||||
compression, title, description, tags, category);
|
||||
try {
|
||||
arbitraryDataWriter.setChunkSize(this.chunkSize);
|
||||
arbitraryDataWriter.save();
|
||||
@@ -215,44 +262,71 @@ public class ArbitraryDataTransactionBuilder {
|
||||
throw new DataException("Arbitrary data file is null");
|
||||
}
|
||||
|
||||
// Get chunks metadata file
|
||||
// Get metadata file
|
||||
ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile();
|
||||
if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) {
|
||||
throw new DataException(String.format("Chunks metadata data file is null but there are %d chunks", arbitraryDataFile.chunkCount()));
|
||||
}
|
||||
|
||||
String digest58 = arbitraryDataFile.digest58();
|
||||
if (digest58 == null) {
|
||||
LOGGER.error("Unable to calculate file digest");
|
||||
throw new DataException("Unable to calculate file digest");
|
||||
// Default to using a data hash, with data held off-chain
|
||||
ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
byte[] data = arbitraryDataFile.digest();
|
||||
|
||||
// For small, single-chunk resources, we can store the data directly on chain
|
||||
if (shouldUseOnChainData && arbitraryDataFile.getBytes().length <= ArbitraryTransaction.MAX_DATA_SIZE && arbitraryDataFile.chunkCount() == 0) {
|
||||
// Within allowed on-chain data size
|
||||
dataType = DataType.RAW_DATA;
|
||||
data = arbitraryDataFile.getBytes();
|
||||
}
|
||||
|
||||
final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP,
|
||||
lastReference, creatorPublicKey, 0L, null);
|
||||
lastReference, creatorPublicKey, fee, null);
|
||||
final int size = (int) arbitraryDataFile.size();
|
||||
final int version = 5;
|
||||
final int nonce = 0;
|
||||
byte[] secret = arbitraryDataFile.getSecret();
|
||||
final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
final byte[] digest = arbitraryDataFile.digest();
|
||||
|
||||
final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null;
|
||||
final List<PaymentData> payments = new ArrayList<>();
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
|
||||
version, service, nonce, size, name, identifier, method,
|
||||
secret, compression, digest, dataType, metadataHash, payments);
|
||||
version, service.value, nonce, size, name, identifier, method,
|
||||
secret, compression, data, dataType, metadataHash, payments);
|
||||
|
||||
this.arbitraryTransactionData = transactionData;
|
||||
|
||||
} catch (DataException e) {
|
||||
} catch (DataException | IOException e) {
|
||||
if (arbitraryDataFile != null) {
|
||||
arbitraryDataFile.deleteAll();
|
||||
arbitraryDataFile.deleteAll(true);
|
||||
}
|
||||
throw(e);
|
||||
throw new DataException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private boolean isMetadataEqual(ArbitraryDataTransactionMetadata existingMetadata) {
|
||||
if (existingMetadata == null) {
|
||||
return !this.hasMetadata();
|
||||
}
|
||||
if (!Objects.equals(existingMetadata.getTitle(), this.title)) {
|
||||
return false;
|
||||
}
|
||||
if (!Objects.equals(existingMetadata.getDescription(), this.description)) {
|
||||
return false;
|
||||
}
|
||||
if (!Objects.equals(existingMetadata.getCategory(), this.category)) {
|
||||
return false;
|
||||
}
|
||||
if (!Objects.equals(existingMetadata.getTags(), this.tags)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean hasMetadata() {
|
||||
return (this.title != null || this.description != null || this.category != null || this.tags != null);
|
||||
}
|
||||
|
||||
public void computeNonce() throws DataException {
|
||||
if (this.arbitraryTransactionData == null) {
|
||||
throw new DataException("Arbitrary transaction data is required to compute nonce");
|
||||
@@ -264,7 +338,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
|
||||
if (result != Transaction.ValidationResult.OK) {
|
||||
arbitraryDataFile.deleteAll();
|
||||
arbitraryDataFile.deleteAll(true);
|
||||
throw new DataException(String.format("Arbitrary transaction invalid: %s", result));
|
||||
}
|
||||
LOGGER.info("Transaction is valid");
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package org.qortal.arbitrary;
|
||||
|
||||
import com.j256.simplemagic.ContentInfo;
|
||||
import com.j256.simplemagic.ContentInfoUtil;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData.*;
|
||||
@@ -22,12 +25,15 @@ import javax.crypto.NoSuchPaddingException;
|
||||
import javax.crypto.SecretKey;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.net.FileNameMap;
|
||||
import java.net.URLConnection;
|
||||
import java.nio.file.*;
|
||||
import java.security.InvalidAlgorithmParameterException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class ArbitraryDataWriter {
|
||||
|
||||
@@ -40,6 +46,14 @@ public class ArbitraryDataWriter {
|
||||
private final Method method;
|
||||
private final Compression compression;
|
||||
|
||||
// Metadata
|
||||
private final String title;
|
||||
private final String description;
|
||||
private final List<String> tags;
|
||||
private final Category category;
|
||||
private List<String> files;
|
||||
private String mimeType;
|
||||
|
||||
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
|
||||
|
||||
private SecretKey aesKey;
|
||||
@@ -50,7 +64,8 @@ public class ArbitraryDataWriter {
|
||||
private Path compressedPath;
|
||||
private Path encryptedPath;
|
||||
|
||||
public ArbitraryDataWriter(Path filePath, String name, Service service, String identifier, Method method, Compression compression) {
|
||||
public ArbitraryDataWriter(Path filePath, String name, Service service, String identifier, Method method, Compression compression,
|
||||
String title, String description, List<String> tags, Category category) {
|
||||
this.filePath = filePath;
|
||||
this.name = name;
|
||||
this.service = service;
|
||||
@@ -62,12 +77,21 @@ public class ArbitraryDataWriter {
|
||||
identifier = null;
|
||||
}
|
||||
this.identifier = identifier;
|
||||
|
||||
// Metadata (optional)
|
||||
this.title = ArbitraryDataTransactionMetadata.limitTitle(title);
|
||||
this.description = ArbitraryDataTransactionMetadata.limitDescription(description);
|
||||
this.tags = ArbitraryDataTransactionMetadata.limitTags(tags);
|
||||
this.category = category;
|
||||
this.files = new ArrayList<>(); // Populated in buildFileList()
|
||||
this.mimeType = null; // Populated in buildFileList()
|
||||
}
|
||||
|
||||
public void save() throws IOException, DataException, InterruptedException, MissingDataException {
|
||||
try {
|
||||
this.preExecute();
|
||||
this.validateService();
|
||||
this.buildFileList();
|
||||
this.process();
|
||||
this.compress();
|
||||
this.encrypt();
|
||||
@@ -83,10 +107,9 @@ public class ArbitraryDataWriter {
|
||||
private void preExecute() throws DataException {
|
||||
this.checkEnabled();
|
||||
|
||||
// Enforce compression when uploading a directory
|
||||
File file = new File(this.filePath.toString());
|
||||
if (file.isDirectory() && compression == Compression.NONE) {
|
||||
throw new DataException("Unable to upload a directory without compression");
|
||||
// Enforce compression when uploading multiple files
|
||||
if (!FilesystemUtils.isSingleFileResource(this.filePath, false) && compression == Compression.NONE) {
|
||||
throw new DataException("Unable to publish multiple files without compression");
|
||||
}
|
||||
|
||||
// Create temporary working directory
|
||||
@@ -125,6 +148,48 @@ public class ArbitraryDataWriter {
|
||||
}
|
||||
}
|
||||
|
||||
private void buildFileList() throws IOException {
|
||||
// Check if the path already points to a single file
|
||||
boolean isSingleFile = this.filePath.toFile().isFile();
|
||||
Path singleFilePath = null;
|
||||
if (isSingleFile) {
|
||||
this.files.add(this.filePath.getFileName().toString());
|
||||
singleFilePath = this.filePath;
|
||||
}
|
||||
else {
|
||||
// Multi file resources (or a single file in a directory) require a walk through the directory tree
|
||||
try (Stream<Path> stream = Files.walk(this.filePath)) {
|
||||
this.files = stream
|
||||
.filter(Files::isRegularFile)
|
||||
.map(p -> this.filePath.relativize(p).toString())
|
||||
.filter(s -> !s.isEmpty())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (this.files.size() == 1) {
|
||||
singleFilePath = Paths.get(this.filePath.toString(), this.files.get(0));
|
||||
|
||||
// Update filePath to point to the single file (instead of the directory containing the file)
|
||||
this.filePath = singleFilePath;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (singleFilePath != null) {
|
||||
// Single file resource, so try and determine the MIME type
|
||||
ContentInfoUtil util = new ContentInfoUtil();
|
||||
ContentInfo info = util.findMatch(singleFilePath.toFile());
|
||||
if (info != null) {
|
||||
// Attempt to extract MIME type from file contents
|
||||
this.mimeType = info.getMimeType();
|
||||
}
|
||||
else {
|
||||
// Fall back to using the filename
|
||||
FileNameMap fileNameMap = URLConnection.getFileNameMap();
|
||||
this.mimeType = fileNameMap.getContentTypeFor(singleFilePath.toFile().getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void process() throws DataException, IOException, MissingDataException {
|
||||
switch (this.method) {
|
||||
|
||||
@@ -251,19 +316,22 @@ public class ArbitraryDataWriter {
|
||||
if (chunkCount > 0) {
|
||||
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
}
|
||||
else {
|
||||
throw new DataException("Unable to split file into chunks");
|
||||
}
|
||||
}
|
||||
|
||||
private void createMetadataFile() throws IOException, DataException {
|
||||
// If we have at least one chunk, we need to create an index file containing their hashes
|
||||
if (this.arbitraryDataFile.chunkCount() > 1) {
|
||||
if (this.needsMetadataFile()) {
|
||||
// Create the JSON file
|
||||
Path chunkFilePath = Paths.get(this.workingPath.toString(), "metadata.json");
|
||||
ArbitraryDataTransactionMetadata chunkMetadata = new ArbitraryDataTransactionMetadata(chunkFilePath);
|
||||
chunkMetadata.setChunks(this.arbitraryDataFile.chunkHashList());
|
||||
chunkMetadata.write();
|
||||
ArbitraryDataTransactionMetadata metadata = new ArbitraryDataTransactionMetadata(chunkFilePath);
|
||||
metadata.setTitle(this.title);
|
||||
metadata.setDescription(this.description);
|
||||
metadata.setTags(this.tags);
|
||||
metadata.setCategory(this.category);
|
||||
metadata.setChunks(this.arbitraryDataFile.chunkHashList());
|
||||
metadata.setFiles(this.files);
|
||||
metadata.setMimeType(this.mimeType);
|
||||
metadata.write();
|
||||
|
||||
// Create an ArbitraryDataFile from the JSON file (we don't have a signature yet)
|
||||
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromPath(chunkFilePath, null);
|
||||
@@ -308,6 +376,20 @@ public class ArbitraryDataWriter {
|
||||
throw new DataException(String.format("Missing chunk %s in metadata file", Base58.encode(chunk)));
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the metadata is correct
|
||||
if (!Objects.equals(metadata.getTitle(), this.title)) {
|
||||
throw new DataException("Metadata mismatch: title");
|
||||
}
|
||||
if (!Objects.equals(metadata.getDescription(), this.description)) {
|
||||
throw new DataException("Metadata mismatch: description");
|
||||
}
|
||||
if (!Objects.equals(metadata.getTags(), this.tags)) {
|
||||
throw new DataException("Metadata mismatch: tags");
|
||||
}
|
||||
if (!Objects.equals(metadata.getCategory(), this.category)) {
|
||||
throw new DataException("Metadata mismatch: category");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,6 +412,16 @@ public class ArbitraryDataWriter {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean needsMetadataFile() {
|
||||
if (this.arbitraryDataFile.chunkCount() > 1) {
|
||||
return true;
|
||||
}
|
||||
if (this.title != null || this.description != null || this.tags != null || this.category != null) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public ArbitraryDataFile getArbitraryDataFile() {
|
||||
return this.arbitraryDataFile;
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
package org.qortal.arbitrary.exception;
|
||||
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
public class DataNotPublishedException extends DataException {
|
||||
|
||||
public DataNotPublishedException() {
|
||||
}
|
||||
|
||||
public DataNotPublishedException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public DataNotPublishedException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
public DataNotPublishedException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,12 +2,14 @@ package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONException;
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@@ -34,7 +36,7 @@ public class ArbitraryDataMetadata {
|
||||
this.filePath = filePath;
|
||||
}
|
||||
|
||||
protected void readJson() throws DataException {
|
||||
protected void readJson() throws DataException, JSONException {
|
||||
// To be overridden
|
||||
}
|
||||
|
||||
@@ -44,8 +46,13 @@ public class ArbitraryDataMetadata {
|
||||
|
||||
|
||||
public void read() throws IOException, DataException {
|
||||
this.loadJson();
|
||||
this.readJson();
|
||||
try {
|
||||
this.loadJson();
|
||||
this.readJson();
|
||||
|
||||
} catch (JSONException e) {
|
||||
throw new DataException(String.format("Unable to read JSON at path %s: %s", this.filePath, e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
public void write() throws IOException, DataException {
|
||||
@@ -58,6 +65,10 @@ public class ArbitraryDataMetadata {
|
||||
writer.close();
|
||||
}
|
||||
|
||||
public void delete() throws IOException {
|
||||
Files.delete(this.filePath);
|
||||
}
|
||||
|
||||
|
||||
protected void loadJson() throws IOException {
|
||||
File metadataFile = new File(this.filePath.toString());
|
||||
@@ -65,7 +76,7 @@ public class ArbitraryDataMetadata {
|
||||
throw new IOException(String.format("Metadata file doesn't exist: %s", this.filePath.toString()));
|
||||
}
|
||||
|
||||
this.jsonString = new String(Files.readAllBytes(this.filePath));
|
||||
this.jsonString = new String(Files.readAllBytes(this.filePath), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Base58;
|
||||
@@ -22,7 +23,7 @@ public class ArbitraryDataMetadataCache extends ArbitraryDataQortalMetadata {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readJson() throws DataException {
|
||||
protected void readJson() throws DataException, JSONException {
|
||||
if (this.jsonString == null) {
|
||||
throw new DataException("Patch JSON string is null");
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package org.qortal.arbitrary.metadata;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.arbitrary.ArbitraryDataDiff.*;
|
||||
import org.qortal.repository.DataException;
|
||||
@@ -40,7 +41,7 @@ public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readJson() throws DataException {
|
||||
protected void readJson() throws DataException, JSONException {
|
||||
if (this.jsonString == null) {
|
||||
throw new DataException("Patch JSON string is null");
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONException;
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
@@ -46,20 +48,6 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
|
||||
return null;
|
||||
}
|
||||
|
||||
protected void readJson() throws DataException {
|
||||
// To be overridden
|
||||
}
|
||||
|
||||
protected void buildJson() {
|
||||
// To be overridden
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void read() throws IOException, DataException {
|
||||
this.loadJson();
|
||||
this.readJson();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write() throws IOException, DataException {
|
||||
@@ -82,7 +70,7 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
|
||||
throw new IOException(String.format("Patch file doesn't exist: %s", path.toString()));
|
||||
}
|
||||
|
||||
this.jsonString = new String(Files.readAllBytes(path));
|
||||
this.jsonString = new String(Files.readAllBytes(path), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
|
||||
@@ -94,9 +82,4 @@ public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public String getJsonString() {
|
||||
return this.jsonString;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,18 +1,33 @@
|
||||
package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
|
||||
private List<byte[]> chunks;
|
||||
private String title;
|
||||
private String description;
|
||||
private List<String> tags;
|
||||
private Category category;
|
||||
private List<String> files;
|
||||
private String mimeType;
|
||||
|
||||
private static int MAX_TITLE_LENGTH = 80;
|
||||
private static int MAX_DESCRIPTION_LENGTH = 240;
|
||||
private static int MAX_TAG_LENGTH = 20;
|
||||
private static int MAX_TAGS_COUNT = 5;
|
||||
|
||||
public ArbitraryDataTransactionMetadata(Path filePath) {
|
||||
super(filePath);
|
||||
@@ -20,15 +35,42 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readJson() throws DataException {
|
||||
protected void readJson() throws DataException, JSONException {
|
||||
if (this.jsonString == null) {
|
||||
throw new DataException("Transaction metadata JSON string is null");
|
||||
}
|
||||
|
||||
JSONObject metadata = new JSONObject(this.jsonString);
|
||||
|
||||
if (metadata.has("title")) {
|
||||
this.title = metadata.getString("title");
|
||||
}
|
||||
|
||||
if (metadata.has("description")) {
|
||||
this.description = metadata.getString("description");
|
||||
}
|
||||
|
||||
List<String> tagsList = new ArrayList<>();
|
||||
if (metadata.has("tags")) {
|
||||
JSONArray tags = metadata.getJSONArray("tags");
|
||||
if (tags != null) {
|
||||
for (int i=0; i<tags.length(); i++) {
|
||||
String tag = tags.getString(i);
|
||||
if (tag != null) {
|
||||
tagsList.add(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.tags = tagsList;
|
||||
}
|
||||
|
||||
if (metadata.has("category")) {
|
||||
this.category = Category.uncategorizedValueOf(metadata.getString("category"));
|
||||
}
|
||||
|
||||
List<byte[]> chunksList = new ArrayList<>();
|
||||
JSONObject cache = new JSONObject(this.jsonString);
|
||||
if (cache.has("chunks")) {
|
||||
JSONArray chunks = cache.getJSONArray("chunks");
|
||||
if (metadata.has("chunks")) {
|
||||
JSONArray chunks = metadata.getJSONArray("chunks");
|
||||
if (chunks != null) {
|
||||
for (int i=0; i<chunks.length(); i++) {
|
||||
String chunk = chunks.getString(i);
|
||||
@@ -39,12 +81,50 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
}
|
||||
this.chunks = chunksList;
|
||||
}
|
||||
|
||||
List<String> filesList = new ArrayList<>();
|
||||
if (metadata.has("files")) {
|
||||
JSONArray files = metadata.getJSONArray("files");
|
||||
if (files != null) {
|
||||
for (int i=0; i<files.length(); i++) {
|
||||
String tag = files.getString(i);
|
||||
if (tag != null) {
|
||||
filesList.add(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.files = filesList;
|
||||
}
|
||||
|
||||
if (metadata.has("mimeType")) {
|
||||
this.mimeType = metadata.getString("mimeType");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void buildJson() {
|
||||
JSONObject outer = new JSONObject();
|
||||
|
||||
if (this.title != null && !this.title.isEmpty()) {
|
||||
outer.put("title", this.title);
|
||||
}
|
||||
|
||||
if (this.description != null && !this.description.isEmpty()) {
|
||||
outer.put("description", this.description);
|
||||
}
|
||||
|
||||
JSONArray tags = new JSONArray();
|
||||
if (this.tags != null) {
|
||||
for (String tag : this.tags) {
|
||||
tags.put(tag);
|
||||
}
|
||||
outer.put("tags", tags);
|
||||
}
|
||||
|
||||
if (this.category != null) {
|
||||
outer.put("category", this.category.toString());
|
||||
}
|
||||
|
||||
JSONArray chunks = new JSONArray();
|
||||
if (this.chunks != null) {
|
||||
for (byte[] chunk : this.chunks) {
|
||||
@@ -53,6 +133,18 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
}
|
||||
outer.put("chunks", chunks);
|
||||
|
||||
JSONArray files = new JSONArray();
|
||||
if (this.files != null) {
|
||||
for (String file : this.files) {
|
||||
files.put(file);
|
||||
}
|
||||
}
|
||||
outer.put("files", files);
|
||||
|
||||
if (this.mimeType != null && !this.mimeType.isEmpty()) {
|
||||
outer.put("mimeType", this.mimeType);
|
||||
}
|
||||
|
||||
this.jsonString = outer.toString(2);
|
||||
LOGGER.trace("Transaction metadata: {}", this.jsonString);
|
||||
}
|
||||
@@ -66,6 +158,54 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
return this.chunks;
|
||||
}
|
||||
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
public String getTitle() {
|
||||
return this.title;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return this.description;
|
||||
}
|
||||
|
||||
public void setTags(List<String> tags) {
|
||||
this.tags = tags;
|
||||
}
|
||||
|
||||
public List<String> getTags() {
|
||||
return this.tags;
|
||||
}
|
||||
|
||||
public void setCategory(Category category) {
|
||||
this.category = category;
|
||||
}
|
||||
|
||||
public Category getCategory() {
|
||||
return this.category;
|
||||
}
|
||||
|
||||
public void setFiles(List<String> files) {
|
||||
this.files = files;
|
||||
}
|
||||
|
||||
public List<String> getFiles() {
|
||||
return this.files;
|
||||
}
|
||||
|
||||
public void setMimeType(String mimeType) {
|
||||
this.mimeType = mimeType;
|
||||
}
|
||||
|
||||
public String getMimeType() {
|
||||
return this.mimeType;
|
||||
}
|
||||
|
||||
public boolean containsChunk(byte[] chunk) {
|
||||
for (byte[] c : this.chunks) {
|
||||
if (Arrays.equals(c, chunk)) {
|
||||
@@ -75,4 +215,80 @@ public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Static helper methods
|
||||
|
||||
public static String trimUTF8String(String string, int maxLength) {
|
||||
byte[] inputBytes = string.getBytes(StandardCharsets.UTF_8);
|
||||
int length = Math.min(inputBytes.length, maxLength);
|
||||
byte[] outputBytes = new byte[length];
|
||||
|
||||
System.arraycopy(inputBytes, 0, outputBytes, 0, length);
|
||||
String result = new String(outputBytes, StandardCharsets.UTF_8);
|
||||
|
||||
// check if last character is truncated
|
||||
int lastIndex = result.length() - 1;
|
||||
|
||||
if (lastIndex > 0 && result.charAt(lastIndex) != string.charAt(lastIndex)) {
|
||||
// last character is truncated so remove the last character
|
||||
return result.substring(0, lastIndex);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public static String limitTitle(String title) {
|
||||
if (title == null) {
|
||||
return null;
|
||||
}
|
||||
if (title.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return trimUTF8String(title, MAX_TITLE_LENGTH);
|
||||
}
|
||||
|
||||
public static String limitDescription(String description) {
|
||||
if (description == null) {
|
||||
return null;
|
||||
}
|
||||
if (description.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return trimUTF8String(description, MAX_DESCRIPTION_LENGTH);
|
||||
}
|
||||
|
||||
public static List<String> limitTags(List<String> tags) {
|
||||
if (tags == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Ensure tags list is mutable
|
||||
List<String> mutableTags = new ArrayList<>(tags);
|
||||
|
||||
int tagCount = mutableTags.size();
|
||||
if (tagCount == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Remove tags over the limit
|
||||
// This is cleaner than truncating, which results in malformed tags
|
||||
// Also remove tags that are empty
|
||||
Iterator iterator = mutableTags.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
String tag = (String) iterator.next();
|
||||
if (tag == null || tag.length() > MAX_TAG_LENGTH || tag.isEmpty()) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Limit the total number of tags
|
||||
if (tagCount > MAX_TAGS_COUNT) {
|
||||
mutableTags = mutableTags.subList(0, MAX_TAGS_COUNT);
|
||||
}
|
||||
|
||||
return mutableTags;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
81
src/main/java/org/qortal/arbitrary/misc/Category.java
Normal file
81
src/main/java/org/qortal/arbitrary/misc/Category.java
Normal file
@@ -0,0 +1,81 @@
|
||||
package org.qortal.arbitrary.misc;
|
||||
|
||||
public enum Category {
|
||||
ART("Art and Design"),
|
||||
AUTOMOTIVE("Automotive"),
|
||||
BEAUTY("Beauty"),
|
||||
BOOKS("Books and Reference"),
|
||||
BUSINESS("Business"),
|
||||
COMMUNICATIONS("Communications"),
|
||||
CRYPTOCURRENCY("Cryptocurrency and Blockchain"),
|
||||
CULTURE("Culture"),
|
||||
DATING("Dating"),
|
||||
DESIGN("Design"),
|
||||
ENTERTAINMENT("Entertainment"),
|
||||
EVENTS("Events"),
|
||||
FAITH("Faith and Religion"),
|
||||
FASHION("Fashion"),
|
||||
FINANCE("Finance"),
|
||||
FOOD("Food and Drink"),
|
||||
GAMING("Gaming"),
|
||||
GEOGRAPHY("Geography"),
|
||||
HEALTH("Health"),
|
||||
HISTORY("History"),
|
||||
HOME("Home"),
|
||||
KNOWLEDGE("Knowledge Share"),
|
||||
LANGUAGE("Language"),
|
||||
LIFESTYLE("Lifestyle"),
|
||||
MANUFACTURING("Manufacturing"),
|
||||
MAPS("Maps and Navigation"),
|
||||
MUSIC("Music"),
|
||||
NEWS("News"),
|
||||
OTHER("Other"),
|
||||
PETS("Pets"),
|
||||
PHILOSOPHY("Philosophy"),
|
||||
PHOTOGRAPHY("Photography"),
|
||||
POLITICS("Politics"),
|
||||
PRODUCE("Products and Services"),
|
||||
PRODUCTIVITY("Productivity"),
|
||||
PSYCHOLOGY("Psychology"),
|
||||
QORTAL("Qortal"),
|
||||
SCIENCE("Science"),
|
||||
SELF_CARE("Self Care"),
|
||||
SELF_SUFFICIENCY("Self-Sufficiency and Homesteading"),
|
||||
SHOPPING("Shopping"),
|
||||
SOCIAL("Social"),
|
||||
SOFTWARE("Software"),
|
||||
SPIRITUALITY("Spirituality"),
|
||||
SPORTS("Sports"),
|
||||
STORYTELLING("Storytelling"),
|
||||
TECHNOLOGY("Technology"),
|
||||
TOOLS("Tools"),
|
||||
TRAVEL("Travel"),
|
||||
UNCATEGORIZED("Uncategorized"),
|
||||
VIDEO("Video"),
|
||||
WEATHER("Weather");
|
||||
|
||||
private final String name;
|
||||
|
||||
Category(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as valueOf() but with fallback to UNCATEGORIZED if there's no match
|
||||
* @param name
|
||||
* @return a Category (using UNCATEGORIZED if no match found)
|
||||
*/
|
||||
public static Category uncategorizedValueOf(String name) {
|
||||
try {
|
||||
return Category.valueOf(name);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
return Category.UNCATEGORIZED;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,26 +1,64 @@
|
||||
package org.qortal.arbitrary.misc;
|
||||
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.arbitrary.ArbitraryDataRenderer;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import static java.util.Arrays.stream;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
public enum Service {
|
||||
AUTO_UPDATE(1, false, null, null),
|
||||
ARBITRARY_DATA(100, false, null, null),
|
||||
WEBSITE(200, true, null, null) {
|
||||
AUTO_UPDATE(1, false, null, false, null),
|
||||
ARBITRARY_DATA(100, false, null, false, null),
|
||||
QCHAT_ATTACHMENT(120, true, 1024*1024L, true, null) {
|
||||
@Override
|
||||
public ValidationResult validate(Path path) {
|
||||
public ValidationResult validate(Path path) throws IOException {
|
||||
ValidationResult superclassResult = super.validate(path);
|
||||
if (superclassResult != ValidationResult.OK) {
|
||||
return superclassResult;
|
||||
}
|
||||
|
||||
File[] files = path.toFile().listFiles();
|
||||
// If already a single file, replace the list with one that contains that file only
|
||||
if (files == null && path.toFile().isFile()) {
|
||||
files = new File[] { path.toFile() };
|
||||
}
|
||||
// Now validate the file's extension
|
||||
if (files != null && files[0] != null) {
|
||||
final String extension = FilenameUtils.getExtension(files[0].getName()).toLowerCase();
|
||||
// We must allow blank file extensions because these are used by data published from a plaintext or base64-encoded string
|
||||
final List<String> allowedExtensions = Arrays.asList("zip", "pdf", "txt", "odt", "ods", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "");
|
||||
if (extension == null || !allowedExtensions.contains(extension)) {
|
||||
return ValidationResult.INVALID_FILE_EXTENSION;
|
||||
}
|
||||
}
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
},
|
||||
ATTACHMENT(130, false, null, true, null),
|
||||
FILE(140, false, null, true, null),
|
||||
FILES(150, false, null, false, null),
|
||||
CHAIN_DATA(160, true, 239L, true, null),
|
||||
WEBSITE(200, true, null, false, null) {
|
||||
@Override
|
||||
public ValidationResult validate(Path path) throws IOException {
|
||||
ValidationResult superclassResult = super.validate(path);
|
||||
if (superclassResult != ValidationResult.OK) {
|
||||
return superclassResult;
|
||||
}
|
||||
|
||||
// Custom validation function to require an index HTML file in the root directory
|
||||
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
|
||||
String[] files = path.toFile().list();
|
||||
@@ -35,33 +73,113 @@ public enum Service {
|
||||
return ValidationResult.MISSING_INDEX_FILE;
|
||||
}
|
||||
},
|
||||
GIT_REPOSITORY(300, false, null, null),
|
||||
IMAGE(400, true, 10*1024*1024L, null),
|
||||
THUMBNAIL(410, true, 500*1024L, null),
|
||||
VIDEO(500, false, null, null),
|
||||
AUDIO(600, false, null, null),
|
||||
BLOG(700, false, null, null),
|
||||
BLOG_POST(777, false, null, null),
|
||||
BLOG_COMMENT(778, false, null, null),
|
||||
DOCUMENT(800, false, null, null),
|
||||
LIST(900, true, null, null),
|
||||
PLAYLIST(910, true, null, null),
|
||||
APP(1000, false, null, null),
|
||||
METADATA(1100, false, null, null),
|
||||
QORTAL_METADATA(1111, true, 10*1024L, Arrays.asList("title", "description", "tags"));
|
||||
GIT_REPOSITORY(300, false, null, false, null),
|
||||
IMAGE(400, true, 10*1024*1024L, true, null),
|
||||
THUMBNAIL(410, true, 500*1024L, true, null),
|
||||
QCHAT_IMAGE(420, true, 500*1024L, true, null),
|
||||
VIDEO(500, false, null, true, null),
|
||||
AUDIO(600, false, null, true, null),
|
||||
QCHAT_AUDIO(610, true, 10*1024*1024L, true, null),
|
||||
QCHAT_VOICE(620, true, 10*1024*1024L, true, null),
|
||||
VOICE(630, true, 10*1024*1024L, true, null),
|
||||
PODCAST(640, false, null, true, null),
|
||||
BLOG(700, false, null, false, null),
|
||||
BLOG_POST(777, false, null, true, null),
|
||||
BLOG_COMMENT(778, true, 500*1024L, true, null),
|
||||
DOCUMENT(800, false, null, true, null),
|
||||
LIST(900, true, null, true, null),
|
||||
PLAYLIST(910, true, null, true, null),
|
||||
APP(1000, true, 50*1024*1024L, false, null),
|
||||
METADATA(1100, false, null, true, null),
|
||||
JSON(1110, true, 25*1024L, true, null) {
|
||||
@Override
|
||||
public ValidationResult validate(Path path) throws IOException {
|
||||
ValidationResult superclassResult = super.validate(path);
|
||||
if (superclassResult != ValidationResult.OK) {
|
||||
return superclassResult;
|
||||
}
|
||||
|
||||
// Require valid JSON
|
||||
byte[] data = FilesystemUtils.getSingleFileContents(path);
|
||||
String json = new String(data, StandardCharsets.UTF_8);
|
||||
try {
|
||||
objectMapper.readTree(json);
|
||||
return ValidationResult.OK;
|
||||
} catch (IOException e) {
|
||||
return ValidationResult.INVALID_CONTENT;
|
||||
}
|
||||
}
|
||||
},
|
||||
GIF_REPOSITORY(1200, true, 25*1024*1024L, false, null) {
|
||||
@Override
|
||||
public ValidationResult validate(Path path) throws IOException {
|
||||
ValidationResult superclassResult = super.validate(path);
|
||||
if (superclassResult != ValidationResult.OK) {
|
||||
return superclassResult;
|
||||
}
|
||||
|
||||
// Custom validation function to require .gif files only, and at least 1
|
||||
int gifCount = 0;
|
||||
File[] files = path.toFile().listFiles();
|
||||
// If already a single file, replace the list with one that contains that file only
|
||||
if (files == null && path.toFile().isFile()) {
|
||||
files = new File[] { path.toFile() };
|
||||
}
|
||||
if (files != null) {
|
||||
for (File file : files) {
|
||||
if (file.getName().equals(".qortal")) {
|
||||
continue;
|
||||
}
|
||||
if (file.isDirectory()) {
|
||||
return ValidationResult.DIRECTORIES_NOT_ALLOWED;
|
||||
}
|
||||
String extension = FilenameUtils.getExtension(file.getName()).toLowerCase();
|
||||
if (!Objects.equals(extension, "gif")) {
|
||||
return ValidationResult.INVALID_FILE_EXTENSION;
|
||||
}
|
||||
gifCount++;
|
||||
}
|
||||
}
|
||||
if (gifCount == 0) {
|
||||
return ValidationResult.MISSING_DATA;
|
||||
}
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
},
|
||||
STORE(1300, false, null, true, null),
|
||||
PRODUCT(1310, false, null, true, null),
|
||||
OFFER(1330, false, null, true, null),
|
||||
COUPON(1340, false, null, true, null),
|
||||
CODE(1400, false, null, true, null),
|
||||
PLUGIN(1410, false, null, true, null),
|
||||
EXTENSION(1420, false, null, true, null),
|
||||
GAME(1500, false, null, false, null),
|
||||
ITEM(1510, false, null, true, null),
|
||||
NFT(1600, false, null, true, null),
|
||||
DATABASE(1700, false, null, false, null),
|
||||
SNAPSHOT(1710, false, null, false, null),
|
||||
COMMENT(1800, true, 500*1024L, true, null),
|
||||
CHAIN_COMMENT(1810, true, 239L, true, null),
|
||||
MAIL(1900, true, 1024*1024L, true, null),
|
||||
MESSAGE(1910, true, 1024*1024L, true, null);
|
||||
|
||||
public final int value;
|
||||
private final boolean requiresValidation;
|
||||
private final Long maxSize;
|
||||
private final boolean single;
|
||||
private final List<String> requiredKeys;
|
||||
|
||||
private static final Map<Integer, Service> map = stream(Service.values())
|
||||
.collect(toMap(service -> service.value, service -> service));
|
||||
|
||||
Service(int value, boolean requiresValidation, Long maxSize, List<String> requiredKeys) {
|
||||
// For JSON validation
|
||||
private static final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
Service(int value, boolean requiresValidation, Long maxSize, boolean single, List<String> requiredKeys) {
|
||||
this.value = value;
|
||||
this.requiresValidation = requiresValidation;
|
||||
this.maxSize = maxSize;
|
||||
this.single = single;
|
||||
this.requiredKeys = requiredKeys;
|
||||
}
|
||||
|
||||
@@ -80,6 +198,11 @@ public enum Service {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate file count if needed
|
||||
if (this.single && data == null) {
|
||||
return ValidationResult.INVALID_FILE_COUNT;
|
||||
}
|
||||
|
||||
// Validate required keys if needed
|
||||
if (this.requiredKeys != null) {
|
||||
if (data == null) {
|
||||
@@ -106,7 +229,7 @@ public enum Service {
|
||||
}
|
||||
|
||||
public static JSONObject toJsonObject(byte[] data) {
|
||||
String dataString = new String(data);
|
||||
String dataString = new String(data, StandardCharsets.UTF_8);
|
||||
return new JSONObject(dataString);
|
||||
}
|
||||
|
||||
@@ -114,7 +237,12 @@ public enum Service {
|
||||
OK(1),
|
||||
MISSING_KEYS(2),
|
||||
EXCEEDS_SIZE_LIMIT(3),
|
||||
MISSING_INDEX_FILE(4);
|
||||
MISSING_INDEX_FILE(4),
|
||||
DIRECTORIES_NOT_ALLOWED(5),
|
||||
INVALID_FILE_EXTENSION(6),
|
||||
MISSING_DATA(7),
|
||||
INVALID_FILE_COUNT(8),
|
||||
INVALID_CONTENT(9);
|
||||
|
||||
public final int value;
|
||||
|
||||
|
||||
@@ -551,7 +551,7 @@ public class QortalATAPI extends API {
|
||||
* <p>
|
||||
* Otherwise, assume B is a public key.
|
||||
*/
|
||||
private Account getAccountFromB(MachineState state) {
|
||||
/*package*/ Account getAccountFromB(MachineState state) {
|
||||
byte[] bBytes = this.getB(state);
|
||||
|
||||
if ((bBytes[0] == Crypto.ADDRESS_VERSION || bBytes[0] == Crypto.AT_ADDRESS_VERSION)
|
||||
|
||||
@@ -10,9 +10,11 @@ import org.ciyam.at.ExecutionException;
|
||||
import org.ciyam.at.FunctionData;
|
||||
import org.ciyam.at.IllegalFunctionCodeException;
|
||||
import org.ciyam.at.MachineState;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.crosschain.Bitcoin;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
/**
|
||||
@@ -160,6 +162,68 @@ public enum QortalFunctionCode {
|
||||
protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException {
|
||||
convertAddressInB(Crypto.ADDRESS_VERSION, state);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Returns account level of account in B.<br>
|
||||
* <tt>0x0520</tt><br>
|
||||
* B should contain either Qortal address or public key,<br>
|
||||
* e.g. as a result of calling function {@link org.ciyam.at.FunctionCode#PUT_ADDRESS_FROM_TX_IN_A_INTO_B}</code>.
|
||||
* <p></p>
|
||||
* Returns account level, or -1 if account unknown.
|
||||
* <p></p>
|
||||
* @see QortalATAPI#getAccountFromB(MachineState)
|
||||
*/
|
||||
GET_ACCOUNT_LEVEL_FROM_ACCOUNT_IN_B(0x0520, 0, true) {
|
||||
@Override
|
||||
protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException {
|
||||
QortalATAPI api = (QortalATAPI) state.getAPI();
|
||||
Account account = api.getAccountFromB(state);
|
||||
|
||||
Integer accountLevel = null;
|
||||
|
||||
if (account != null) {
|
||||
try {
|
||||
accountLevel = account.getLevel();
|
||||
} catch (DataException e) {
|
||||
throw new RuntimeException("AT API unable to fetch account level?", e);
|
||||
}
|
||||
}
|
||||
|
||||
functionData.returnValue = accountLevel != null
|
||||
? accountLevel.longValue()
|
||||
: -1;
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Returns account's minted block count of account in B.<br>
|
||||
* <tt>0x0521</tt><br>
|
||||
* B should contain either Qortal address or public key,<br>
|
||||
* e.g. as a result of calling function {@link org.ciyam.at.FunctionCode#PUT_ADDRESS_FROM_TX_IN_A_INTO_B}</code>.
|
||||
* <p></p>
|
||||
* Returns account level, or -1 if account unknown.
|
||||
* <p></p>
|
||||
* @see QortalATAPI#getAccountFromB(MachineState)
|
||||
*/
|
||||
GET_BLOCKS_MINTED_FROM_ACCOUNT_IN_B(0x0521, 0, true) {
|
||||
@Override
|
||||
protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException {
|
||||
QortalATAPI api = (QortalATAPI) state.getAPI();
|
||||
Account account = api.getAccountFromB(state);
|
||||
|
||||
Integer blocksMinted = null;
|
||||
|
||||
if (account != null) {
|
||||
try {
|
||||
blocksMinted = account.getBlocksMinted();
|
||||
} catch (DataException e) {
|
||||
throw new RuntimeException("AT API unable to fetch account's minted block count?", e);
|
||||
}
|
||||
}
|
||||
|
||||
functionData.returnValue = blocksMinted != null
|
||||
? blocksMinted.longValue()
|
||||
: -1;
|
||||
}
|
||||
};
|
||||
|
||||
public final short value;
|
||||
|
||||
@@ -3,18 +3,15 @@ package org.qortal.block;
|
||||
import static java.util.Arrays.stream;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.math.RoundingMode;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
@@ -28,8 +25,9 @@ import org.qortal.asset.Asset;
|
||||
import org.qortal.at.AT;
|
||||
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||
import org.qortal.block.BlockChain.AccountLevelShareBin;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.OnlineAccountsManager;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.EligibleQoraHolderData;
|
||||
@@ -91,7 +89,8 @@ public class Block {
|
||||
ONLINE_ACCOUNT_UNKNOWN(71),
|
||||
ONLINE_ACCOUNT_SIGNATURES_MISSING(72),
|
||||
ONLINE_ACCOUNT_SIGNATURES_MALFORMED(73),
|
||||
ONLINE_ACCOUNT_SIGNATURE_INCORRECT(74);
|
||||
ONLINE_ACCOUNT_SIGNATURE_INCORRECT(74),
|
||||
ONLINE_ACCOUNT_NONCE_INCORRECT(75);
|
||||
|
||||
public final int value;
|
||||
|
||||
@@ -124,6 +123,8 @@ public class Block {
|
||||
|
||||
/** Remote/imported/loaded AT states */
|
||||
protected List<ATStateData> atStates;
|
||||
/** Remote hash of AT states - in lieu of full AT state data in {@code atStates} */
|
||||
protected byte[] atStatesHash;
|
||||
/** Locally-generated AT states */
|
||||
protected List<ATStateData> ourAtStates;
|
||||
/** Locally-generated AT fees */
|
||||
@@ -135,7 +136,7 @@ public class Block {
|
||||
}
|
||||
|
||||
/** Lazy-instantiated expanded info on block's online accounts. */
|
||||
private static class ExpandedAccount {
|
||||
public static class ExpandedAccount {
|
||||
private final RewardShareData rewardShareData;
|
||||
private final int sharePercent;
|
||||
private final boolean isRecipientAlsoMinter;
|
||||
@@ -168,6 +169,13 @@ public class Block {
|
||||
}
|
||||
}
|
||||
|
||||
public Account getMintingAccount() {
|
||||
return this.mintingAccount;
|
||||
}
|
||||
public Account getRecipientAccount() {
|
||||
return this.recipientAccount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns share bin for expanded account.
|
||||
* <p>
|
||||
@@ -184,8 +192,11 @@ public class Block {
|
||||
if (accountLevel <= 0)
|
||||
return null; // level 0 isn't included in any share bins
|
||||
|
||||
// Select the correct set of share bins based on block height
|
||||
final BlockChain blockChain = BlockChain.getInstance();
|
||||
final AccountLevelShareBin[] shareBinsByLevel = blockChain.getShareBinsByAccountLevel();
|
||||
final AccountLevelShareBin[] shareBinsByLevel = (blockHeight >= blockChain.getSharesByLevelV2Height()) ?
|
||||
blockChain.getShareBinsByAccountLevelV2() : blockChain.getShareBinsByAccountLevelV1();
|
||||
|
||||
if (accountLevel > shareBinsByLevel.length)
|
||||
return null;
|
||||
|
||||
@@ -198,6 +209,11 @@ public class Block {
|
||||
|
||||
}
|
||||
|
||||
public boolean hasShareBin(AccountLevelShareBin shareBin, int blockHeight) {
|
||||
AccountLevelShareBin ourShareBin = this.getShareBin(blockHeight);
|
||||
return ourShareBin != null && shareBin.id == ourShareBin.id;
|
||||
}
|
||||
|
||||
public long distribute(long accountAmount, Map<String, Long> balanceChanges) {
|
||||
if (this.isRecipientAlsoMinter) {
|
||||
// minter & recipient the same - simpler case
|
||||
@@ -222,11 +238,10 @@ public class Block {
|
||||
return accountAmount;
|
||||
}
|
||||
}
|
||||
|
||||
/** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */
|
||||
private List<ExpandedAccount> cachedExpandedAccounts = null;
|
||||
|
||||
/** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */
|
||||
private List<OnlineAccountData> cachedValidOnlineAccounts = null;
|
||||
/** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */
|
||||
private List<RewardShareData> cachedOnlineRewardShares = null;
|
||||
|
||||
@@ -261,7 +276,7 @@ public class Block {
|
||||
* Constructs new Block using passed transaction and AT states.
|
||||
* <p>
|
||||
* This constructor typically used when receiving a serialized block over the network.
|
||||
*
|
||||
*
|
||||
* @param repository
|
||||
* @param blockData
|
||||
* @param transactions
|
||||
@@ -287,6 +302,35 @@ public class Block {
|
||||
this.blockData.setTotalFees(totalFees);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs new Block using passed transaction and minimal AT state info.
|
||||
* <p>
|
||||
* This constructor typically used when receiving a serialized block over the network.
|
||||
*
|
||||
* @param repository
|
||||
* @param blockData
|
||||
* @param transactions
|
||||
* @param atStatesHash
|
||||
*/
|
||||
public Block(Repository repository, BlockData blockData, List<TransactionData> transactions, byte[] atStatesHash) {
|
||||
this(repository, blockData);
|
||||
|
||||
this.transactions = new ArrayList<>();
|
||||
|
||||
long totalFees = 0;
|
||||
|
||||
// We have to sum fees too
|
||||
for (TransactionData transactionData : transactions) {
|
||||
this.transactions.add(Transaction.fromData(repository, transactionData));
|
||||
totalFees += transactionData.getFee();
|
||||
}
|
||||
|
||||
this.atStatesHash = atStatesHash;
|
||||
totalFees += this.blockData.getATFees();
|
||||
|
||||
this.blockData.setTotalFees(totalFees);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs new Block with empty transaction list, using passed minter account.
|
||||
*
|
||||
@@ -319,28 +363,47 @@ public class Block {
|
||||
int version = parentBlock.getNextBlockVersion();
|
||||
byte[] reference = parentBlockData.getSignature();
|
||||
|
||||
// Fetch our list of online accounts
|
||||
List<OnlineAccountData> onlineAccounts = Controller.getInstance().getOnlineAccounts();
|
||||
if (onlineAccounts.isEmpty()) {
|
||||
LOGGER.error("No online accounts - not even our own?");
|
||||
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
|
||||
if (minterLevel == 0) {
|
||||
LOGGER.error("Minter effective level returned zero?");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find newest online accounts timestamp
|
||||
long onlineAccountsTimestamp = 0;
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp)
|
||||
onlineAccountsTimestamp = onlineAccountData.getTimestamp();
|
||||
int height = parentBlockData.getHeight() + 1;
|
||||
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
|
||||
long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp();
|
||||
|
||||
// Fetch our list of online accounts, removing any that are missing a nonce
|
||||
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp);
|
||||
onlineAccounts.removeIf(a -> a.getNonce() == null || a.getNonce() < 0);
|
||||
|
||||
// After feature trigger, remove any online accounts that are level 0
|
||||
if (height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
onlineAccounts.removeIf(a -> {
|
||||
try {
|
||||
return Account.getRewardShareEffectiveMintingLevel(repository, a.getPublicKey()) == 0;
|
||||
} catch (DataException e) {
|
||||
// Something went wrong, so remove the account
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (onlineAccounts.isEmpty()) {
|
||||
LOGGER.debug("No online accounts - not even our own?");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Load sorted list of reward share public keys into memory, so that the indexes can be obtained.
|
||||
// This is up to 100x faster than querying each index separately. For 4150 reward share keys, it
|
||||
// was taking around 5000ms to query individually, vs 50ms using this approach.
|
||||
List<byte[]> allRewardSharePublicKeys = repository.getAccountRepository().getRewardSharePublicKeys();
|
||||
|
||||
// Map using index into sorted list of reward-shares as key
|
||||
Map<Integer, OnlineAccountData> indexedOnlineAccounts = new HashMap<>();
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
// Disregard online accounts with different timestamps
|
||||
if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp)
|
||||
continue;
|
||||
|
||||
Integer accountIndex = repository.getAccountRepository().getRewardShareIndex(onlineAccountData.getPublicKey());
|
||||
Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys);
|
||||
if (accountIndex == null)
|
||||
// Online account (reward-share) with current timestamp but reward-share cancelled
|
||||
continue;
|
||||
@@ -356,29 +419,43 @@ public class Block {
|
||||
byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet);
|
||||
int onlineAccountsCount = onlineAccountsSet.size();
|
||||
|
||||
// Concatenate online account timestamp signatures (in correct order)
|
||||
byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH];
|
||||
for (int i = 0; i < onlineAccountsCount; ++i) {
|
||||
Integer accountIndex = accountIndexes.get(i);
|
||||
OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex);
|
||||
System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH);
|
||||
// Collate all signatures
|
||||
Collection<byte[]> signaturesToAggregate = indexedOnlineAccounts.values()
|
||||
.stream()
|
||||
.map(OnlineAccountData::getSignature)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Aggregated, single signature
|
||||
byte[] onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate);
|
||||
|
||||
// Add nonces to the end of the online accounts signatures
|
||||
try {
|
||||
// Create ordered list of nonce values
|
||||
List<Integer> nonces = new ArrayList<>();
|
||||
for (int i = 0; i < onlineAccountsCount; ++i) {
|
||||
Integer accountIndex = accountIndexes.get(i);
|
||||
OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex);
|
||||
nonces.add(onlineAccountData.getNonce());
|
||||
}
|
||||
|
||||
// Encode the nonces to a byte array
|
||||
byte[] encodedNonces = BlockTransformer.encodeOnlineAccountNonces(nonces);
|
||||
|
||||
// Append the encoded nonces to the encoded online account signatures
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
outputStream.write(onlineAccountsSignatures);
|
||||
outputStream.write(encodedNonces);
|
||||
onlineAccountsSignatures = outputStream.toByteArray();
|
||||
}
|
||||
catch (TransformationException | IOException e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData,
|
||||
minter.getPublicKey(), encodedOnlineAccounts));
|
||||
|
||||
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
|
||||
if (minterLevel == 0) {
|
||||
LOGGER.error("Minter effective level returned zero?");
|
||||
return null;
|
||||
}
|
||||
|
||||
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
|
||||
|
||||
int transactionCount = 0;
|
||||
byte[] transactionsSignature = null;
|
||||
int height = parentBlockData.getHeight() + 1;
|
||||
|
||||
int atCount = 0;
|
||||
long atFees = 0;
|
||||
@@ -580,6 +657,10 @@ public class Block {
|
||||
return this.atStates;
|
||||
}
|
||||
|
||||
public byte[] getAtStatesHash() {
|
||||
return this.atStatesHash;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return expanded info on block's online accounts.
|
||||
* <p>
|
||||
@@ -972,6 +1053,15 @@ public class Block {
|
||||
if (onlineRewardShares == null)
|
||||
return ValidationResult.ONLINE_ACCOUNT_UNKNOWN;
|
||||
|
||||
// After feature trigger, require all online account minters to be greater than level 0
|
||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||
List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||
for (ExpandedAccount account : expandedAccounts) {
|
||||
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
|
||||
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
// If block is past a certain age then we simply assume the signatures were correct
|
||||
long signatureRequirementThreshold = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMinLifetime();
|
||||
if (this.blockData.getTimestamp() < signatureRequirementThreshold)
|
||||
@@ -980,49 +1070,64 @@ public class Block {
|
||||
if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0)
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING;
|
||||
|
||||
if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH)
|
||||
final int signaturesLength = Transformer.SIGNATURE_LENGTH;
|
||||
final int noncesLength = onlineRewardShares.size() * Transformer.INT_LENGTH;
|
||||
|
||||
// We expect nonces to be appended to the online accounts signatures
|
||||
if (this.blockData.getOnlineAccountsSignatures().length != signaturesLength + noncesLength)
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED;
|
||||
|
||||
// Check signatures
|
||||
long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp();
|
||||
byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp);
|
||||
|
||||
// If this block is much older than current online timestamp, then there's no point checking current online accounts
|
||||
List<OnlineAccountData> currentOnlineAccounts = onlineTimestamp < NTP.getTime() - Controller.ONLINE_TIMESTAMP_MODULUS
|
||||
? null
|
||||
: Controller.getInstance().getOnlineAccounts();
|
||||
List<OnlineAccountData> latestBlocksOnlineAccounts = Controller.getInstance().getLatestBlocksOnlineAccounts();
|
||||
byte[] encodedOnlineAccountSignatures = this.blockData.getOnlineAccountsSignatures();
|
||||
|
||||
// Extract online accounts' timestamp signatures from block data
|
||||
List<byte[]> onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures());
|
||||
// Split online account signatures into signature(s) + nonces, then validate the nonces
|
||||
byte[] extractedSignatures = BlockTransformer.extract(encodedOnlineAccountSignatures, 0, signaturesLength);
|
||||
byte[] extractedNonces = BlockTransformer.extract(encodedOnlineAccountSignatures, signaturesLength, onlineRewardShares.size() * Transformer.INT_LENGTH);
|
||||
encodedOnlineAccountSignatures = extractedSignatures;
|
||||
|
||||
// We'll build up a list of online accounts to hand over to Controller if block is added to chain
|
||||
// and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block...
|
||||
List<OnlineAccountData> ourOnlineAccounts = new ArrayList<>();
|
||||
List<Integer> nonces = BlockTransformer.decodeOnlineAccountNonces(extractedNonces);
|
||||
|
||||
for (int i = 0; i < onlineAccountsSignatures.size(); ++i) {
|
||||
byte[] signature = onlineAccountsSignatures.get(i);
|
||||
// Build block's view of online accounts (without signatures, as we don't need them here)
|
||||
Set<OnlineAccountData> onlineAccounts = new HashSet<>();
|
||||
for (int i = 0; i < onlineRewardShares.size(); ++i) {
|
||||
Integer nonce = nonces.get(i);
|
||||
byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey();
|
||||
|
||||
OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey);
|
||||
ourOnlineAccounts.add(onlineAccountData);
|
||||
|
||||
// If signature is still current then no need to perform Ed25519 verify
|
||||
if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData))
|
||||
// remove() returned true, so online account still current
|
||||
// and one less entry in currentOnlineAccounts to check next time
|
||||
continue;
|
||||
|
||||
// If signature was okay in latest block then no need to perform Ed25519 verify
|
||||
if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData))
|
||||
continue;
|
||||
|
||||
if (!Crypto.verify(publicKey, signature, onlineTimestampBytes))
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
|
||||
OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, null, publicKey, nonce);
|
||||
onlineAccounts.add(onlineAccountData);
|
||||
}
|
||||
|
||||
// Remove those already validated & cached by online accounts manager - no need to re-validate them
|
||||
OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp);
|
||||
|
||||
// Validate the rest
|
||||
for (OnlineAccountData onlineAccount : onlineAccounts)
|
||||
if (!OnlineAccountsManager.getInstance().verifyMemoryPoW(onlineAccount, null))
|
||||
return ValidationResult.ONLINE_ACCOUNT_NONCE_INCORRECT;
|
||||
|
||||
// Cache the valid online accounts as they will likely be needed for the next block
|
||||
OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp);
|
||||
|
||||
// Extract online accounts' timestamp signatures from block data. Only one signature if aggregated.
|
||||
List<byte[]> onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(encodedOnlineAccountSignatures);
|
||||
|
||||
// Aggregate all public keys
|
||||
Collection<byte[]> publicKeys = onlineRewardShares.stream()
|
||||
.map(RewardShareData::getRewardSharePublicKey)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys);
|
||||
|
||||
byte[] aggregateSignature = onlineAccountsSignatures.get(0);
|
||||
|
||||
// One-step verification of aggregate signature using aggregate public key
|
||||
if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes))
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
|
||||
|
||||
// All online accounts valid, so save our list of online accounts for potential later use
|
||||
this.cachedValidOnlineAccounts = ourOnlineAccounts;
|
||||
this.cachedOnlineRewardShares = onlineRewardShares;
|
||||
|
||||
return ValidationResult.OK;
|
||||
@@ -1169,6 +1274,7 @@ public class Block {
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("DataException during transaction validation", e);
|
||||
return ValidationResult.TRANSACTION_INVALID;
|
||||
} finally {
|
||||
// Rollback repository changes made by test-processing transactions above
|
||||
@@ -1195,7 +1301,7 @@ public class Block {
|
||||
*/
|
||||
private ValidationResult areAtsValid() throws DataException {
|
||||
// Locally generated AT states should be valid so no need to re-execute them
|
||||
if (this.ourAtStates == this.getATStates()) // Note object reference compare
|
||||
if (this.ourAtStates != null && this.ourAtStates == this.atStates) // Note object reference compare
|
||||
return ValidationResult.OK;
|
||||
|
||||
// Generate local AT states for comparison
|
||||
@@ -1209,8 +1315,33 @@ public class Block {
|
||||
if (this.ourAtFees != this.blockData.getATFees())
|
||||
return ValidationResult.AT_STATES_MISMATCH;
|
||||
|
||||
// Note: this.atStates fully loaded thanks to this.getATStates() call above
|
||||
for (int s = 0; s < this.atStates.size(); ++s) {
|
||||
// If we have a single AT states hash then compare that in preference
|
||||
if (this.atStatesHash != null) {
|
||||
int atBytesLength = blockData.getATCount() * BlockTransformer.AT_ENTRY_LENGTH;
|
||||
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
|
||||
|
||||
try {
|
||||
for (ATStateData atStateData : this.ourAtStates) {
|
||||
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
|
||||
atHashBytes.write(atStateData.getStateHash());
|
||||
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Couldn't validate AT states hash due to serialization issue?", e);
|
||||
}
|
||||
|
||||
byte[] ourAtStatesHash = Crypto.digest(atHashBytes.toByteArray());
|
||||
if (!Arrays.equals(ourAtStatesHash, this.atStatesHash))
|
||||
return ValidationResult.AT_STATES_MISMATCH;
|
||||
|
||||
// Use our AT state data from now on
|
||||
this.atStates = this.ourAtStates;
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
// Note: this.atStates fully loaded thanks to this.getATStates() call:
|
||||
this.getATStates();
|
||||
for (int s = 0; s < this.ourAtStates.size(); ++s) {
|
||||
ATStateData ourAtState = this.ourAtStates.get(s);
|
||||
ATStateData theirAtState = this.atStates.get(s);
|
||||
|
||||
@@ -1336,6 +1467,9 @@ public class Block {
|
||||
if (this.blockData.getHeight() == 212937)
|
||||
// Apply fix for block 212937
|
||||
Block212937.processFix(this);
|
||||
|
||||
else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height())
|
||||
SelfSponsorshipAlgoV1Block.processAccountPenalties(this);
|
||||
}
|
||||
|
||||
// We're about to (test-)process a batch of transactions,
|
||||
@@ -1368,9 +1502,6 @@ public class Block {
|
||||
|
||||
postBlockTidy();
|
||||
|
||||
// Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block
|
||||
Controller.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts);
|
||||
|
||||
// Log some debugging info relating to the block weight calculation
|
||||
this.logDebugInfo();
|
||||
}
|
||||
@@ -1395,19 +1526,23 @@ public class Block {
|
||||
// Batch update in repository
|
||||
repository.getAccountRepository().modifyMintedBlockCounts(allUniqueExpandedAccounts.stream().map(AccountData::getAddress).collect(Collectors.toList()), +1);
|
||||
|
||||
// Keep track of level bumps in case we need to apply to other entries
|
||||
Map<String, Integer> bumpedAccounts = new HashMap<>();
|
||||
|
||||
// Local changes and also checks for level bump
|
||||
for (AccountData accountData : allUniqueExpandedAccounts) {
|
||||
// Adjust count locally (in Java)
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() + 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s up to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment();
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
if (newLevel > accountData.getLevel()) {
|
||||
// Account has increased in level!
|
||||
accountData.setLevel(newLevel);
|
||||
bumpedAccounts.put(accountData.getAddress(), newLevel);
|
||||
repository.getAccountRepository().setLevel(accountData);
|
||||
LOGGER.trace(() -> String.format("Block minter %s bumped to level %d", accountData.getAddress(), accountData.getLevel()));
|
||||
}
|
||||
@@ -1415,6 +1550,25 @@ public class Block {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Also bump other entries if need be
|
||||
if (!bumpedAccounts.isEmpty()) {
|
||||
for (ExpandedAccount expandedAccount : expandedAccounts) {
|
||||
Integer newLevel = bumpedAccounts.get(expandedAccount.mintingAccountData.getAddress());
|
||||
if (newLevel != null && expandedAccount.mintingAccountData.getLevel() != newLevel) {
|
||||
expandedAccount.mintingAccountData.setLevel(newLevel);
|
||||
LOGGER.trace("Also bumped {} to level {}", expandedAccount.mintingAccountData.getAddress(), newLevel);
|
||||
}
|
||||
|
||||
if (!expandedAccount.isRecipientAlsoMinter) {
|
||||
newLevel = bumpedAccounts.get(expandedAccount.recipientAccountData.getAddress());
|
||||
if (newLevel != null && expandedAccount.recipientAccountData.getLevel() != newLevel) {
|
||||
expandedAccount.recipientAccountData.setLevel(newLevel);
|
||||
LOGGER.trace("Also bumped {} to level {}", expandedAccount.recipientAccountData.getAddress(), newLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void processBlockRewards() throws DataException {
|
||||
@@ -1574,6 +1728,9 @@ public class Block {
|
||||
// Revert fix for block 212937
|
||||
Block212937.orphanFix(this);
|
||||
|
||||
else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height())
|
||||
SelfSponsorshipAlgoV1Block.orphanAccountPenalties(this);
|
||||
|
||||
// Block rewards, including transaction fees, removed after transactions undone
|
||||
orphanBlockRewards();
|
||||
|
||||
@@ -1586,9 +1743,6 @@ public class Block {
|
||||
this.blockData.setHeight(null);
|
||||
|
||||
postBlockTidy();
|
||||
|
||||
// Remove any cached, valid online accounts data from Controller
|
||||
Controller.getInstance().popLatestBlocksOnlineAccounts();
|
||||
}
|
||||
|
||||
protected void orphanTransactionsFromBlock() throws DataException {
|
||||
@@ -1705,7 +1859,7 @@ public class Block {
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() - 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s down to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment();
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
@@ -1825,13 +1979,72 @@ public class Block {
|
||||
final List<ExpandedAccount> onlineFounderAccounts = expandedAccounts.stream().filter(expandedAccount -> expandedAccount.isMinterFounder).collect(Collectors.toList());
|
||||
final boolean haveFounders = !onlineFounderAccounts.isEmpty();
|
||||
|
||||
// Select the correct set of share bins based on block height
|
||||
List<AccountLevelShareBin> accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
|
||||
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
|
||||
|
||||
// Determine reward candidates based on account level
|
||||
List<AccountLevelShareBin> accountLevelShareBins = BlockChain.getInstance().getAccountLevelShareBins();
|
||||
for (int binIndex = 0; binIndex < accountLevelShareBins.size(); ++binIndex) {
|
||||
// Find all accounts in share bin. getShareBin() returns null for minter accounts that are also founders, so they are effectively filtered out.
|
||||
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
|
||||
List<AccountLevelShareBin> accountLevelShareBins = new ArrayList<>();
|
||||
for (AccountLevelShareBin accountLevelShareBin : accountLevelShareBinsForBlock) {
|
||||
accountLevelShareBins.add((AccountLevelShareBin) accountLevelShareBin.clone());
|
||||
}
|
||||
|
||||
Map<Integer, List<ExpandedAccount>> accountsForShareBin = new HashMap<>();
|
||||
|
||||
// We might need to combine some share bins if they haven't reached the minimum number of minters yet
|
||||
for (int binIndex = accountLevelShareBins.size()-1; binIndex >= 0; --binIndex) {
|
||||
AccountLevelShareBin accountLevelShareBin = accountLevelShareBins.get(binIndex);
|
||||
// Object reference compare is OK as all references are read-only from blockchain config.
|
||||
List<ExpandedAccount> binnedAccounts = expandedAccounts.stream().filter(accountInfo -> accountInfo.getShareBin(this.blockData.getHeight()) == accountLevelShareBin).collect(Collectors.toList());
|
||||
|
||||
// Find all accounts in share bin. getShareBin() returns null for minter accounts that are also founders, so they are effectively filtered out.
|
||||
List<ExpandedAccount> binnedAccounts = expandedAccounts.stream().filter(accountInfo -> accountInfo.hasShareBin(accountLevelShareBin, this.blockData.getHeight())).collect(Collectors.toList());
|
||||
// Add any accounts that have been moved down from a higher tier
|
||||
List<ExpandedAccount> existingBinnedAccounts = accountsForShareBin.get(binIndex);
|
||||
if (existingBinnedAccounts != null)
|
||||
binnedAccounts.addAll(existingBinnedAccounts);
|
||||
|
||||
// Logic below may only apply to higher levels, and only for share bins with a specific range of online accounts
|
||||
if (accountLevelShareBin.levels.get(0) < BlockChain.getInstance().getShareBinActivationMinLevel() ||
|
||||
binnedAccounts.isEmpty() || binnedAccounts.size() >= BlockChain.getInstance().getMinAccountsToActivateShareBin()) {
|
||||
// Add all accounts for this share bin to the accountsForShareBin list
|
||||
accountsForShareBin.put(binIndex, binnedAccounts);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Share bin contains more than one, but less than the minimum number of minters. We treat this share bin
|
||||
// as not activated yet. In these cases, the rewards and minters are combined and paid out to the previous
|
||||
// share bin, to prevent a single or handful of accounts receiving the entire rewards for a share bin.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// - Share bin for levels 5 and 6 has 100 minters
|
||||
// - Share bin for levels 7 and 8 has 10 minters
|
||||
//
|
||||
// This is below the minimum of 30, so share bins are reconstructed as follows:
|
||||
//
|
||||
// - Share bin for levels 5 and 6 now contains 110 minters
|
||||
// - Share bin for levels 7 and 8 now contains 0 minters
|
||||
// - Share bin for levels 5 and 6 now pays out rewards for levels 5, 6, 7, and 8
|
||||
// - Share bin for levels 7 and 8 pays zero rewards
|
||||
//
|
||||
// This process is iterative, so will combine several tiers if needed.
|
||||
|
||||
// Designate this share bin as empty
|
||||
accountsForShareBin.put(binIndex, new ArrayList<>());
|
||||
|
||||
// Move the accounts originally intended for this share bin to the previous one
|
||||
accountsForShareBin.put(binIndex - 1, binnedAccounts);
|
||||
|
||||
// Move the block reward from this share bin to the previous one
|
||||
AccountLevelShareBin previousShareBin = accountLevelShareBins.get(binIndex - 1);
|
||||
previousShareBin.share += accountLevelShareBin.share;
|
||||
accountLevelShareBin.share = 0L;
|
||||
}
|
||||
|
||||
// Now loop through (potentially modified) share bins and determine the reward candidates
|
||||
for (int binIndex = 0; binIndex < accountLevelShareBins.size(); ++binIndex) {
|
||||
AccountLevelShareBin accountLevelShareBin = accountLevelShareBins.get(binIndex);
|
||||
List<ExpandedAccount> binnedAccounts = accountsForShareBin.get(binIndex);
|
||||
|
||||
// No online accounts in this bin? Skip to next one
|
||||
if (binnedAccounts.isEmpty())
|
||||
@@ -1849,7 +2062,7 @@ public class Block {
|
||||
// Fetch list of legacy QORA holders who haven't reached their cap of QORT reward.
|
||||
List<EligibleQoraHolderData> qoraHolders = this.repository.getAccountRepository().getEligibleLegacyQoraHolders(isProcessingNotOrphaning ? null : this.blockData.getHeight());
|
||||
final boolean haveQoraHolders = !qoraHolders.isEmpty();
|
||||
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShare();
|
||||
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShareAtHeight(this.blockData.getHeight());
|
||||
|
||||
// Perform account-level-based reward scaling if appropriate
|
||||
if (!haveFounders) {
|
||||
@@ -2029,6 +2242,26 @@ public class Block {
|
||||
this.repository.getAccountRepository().tidy();
|
||||
}
|
||||
|
||||
// Utils
|
||||
|
||||
/**
|
||||
* Find index of rewardSharePublicKey in list of rewardSharePublicKeys
|
||||
*
|
||||
* @param rewardSharePublicKey - the key to query
|
||||
* @param rewardSharePublicKeys - a sorted list of keys
|
||||
* @return - the index of the key, or null if not found
|
||||
*/
|
||||
private static Integer getRewardShareIndex(byte[] rewardSharePublicKey, List<byte[]> rewardSharePublicKeys) {
|
||||
int index = 0;
|
||||
for (byte[] publicKey : rewardSharePublicKeys) {
|
||||
if (Arrays.equals(rewardSharePublicKey, publicKey)) {
|
||||
return index;
|
||||
}
|
||||
index++;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void logDebugInfo() {
|
||||
try {
|
||||
// Avoid calculations if possible. We have to check against INFO here, since Level.isMoreSpecificThan() confusingly uses <= rather than just <
|
||||
|
||||
@@ -68,14 +68,28 @@ public class BlockChain {
|
||||
atFindNextTransactionFix,
|
||||
newBlockSigHeight,
|
||||
shareBinFix,
|
||||
sharesByLevelV2Height,
|
||||
rewardShareLimitTimestamp,
|
||||
calcChainWeightTimestamp,
|
||||
transactionV5Timestamp;
|
||||
transactionV5Timestamp,
|
||||
transactionV6Timestamp,
|
||||
disableReferenceTimestamp,
|
||||
increaseOnlineAccountsDifficultyTimestamp,
|
||||
onlineAccountMinterLevelValidationHeight,
|
||||
selfSponsorshipAlgoV1Height,
|
||||
feeValidationFixTimestamp,
|
||||
chatReferenceTimestamp,
|
||||
arbitraryOptionalFeeTimestamp;
|
||||
}
|
||||
|
||||
// Custom transaction fees
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private long nameRegistrationUnitFee;
|
||||
private long nameRegistrationUnitFeeTimestamp;
|
||||
/** Unit fees by transaction timestamp */
|
||||
public static class UnitFeesByTimestamp {
|
||||
public long timestamp;
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long fee;
|
||||
}
|
||||
private List<UnitFeesByTimestamp> nameRegistrationUnitFees;
|
||||
|
||||
/** Map of which blockchain features are enabled when (height/timestamp) */
|
||||
@XmlJavaTypeAdapter(StringLongMapXmlAdapter.class)
|
||||
@@ -87,6 +101,13 @@ public class BlockChain {
|
||||
/** Whether only one registered name is allowed per account. */
|
||||
private boolean oneNamePerAccount = false;
|
||||
|
||||
/** Checkpoints */
|
||||
public static class Checkpoint {
|
||||
public int height;
|
||||
public String signature;
|
||||
}
|
||||
private List<Checkpoint> checkpoints;
|
||||
|
||||
/** Block rewards by block height */
|
||||
public static class RewardByHeight {
|
||||
public int height;
|
||||
@@ -96,23 +117,48 @@ public class BlockChain {
|
||||
private List<RewardByHeight> rewardsByHeight;
|
||||
|
||||
/** Share of block reward/fees by account level */
|
||||
public static class AccountLevelShareBin {
|
||||
public static class AccountLevelShareBin implements Cloneable {
|
||||
public int id;
|
||||
public List<Integer> levels;
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long share;
|
||||
}
|
||||
private List<AccountLevelShareBin> sharesByLevel;
|
||||
/** Generated lookup of share-bin by account level */
|
||||
private AccountLevelShareBin[] shareBinsByLevel;
|
||||
|
||||
/** Share of block reward/fees to legacy QORA coin holders */
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private Long qoraHoldersShare;
|
||||
public Object clone() {
|
||||
AccountLevelShareBin shareBinCopy = new AccountLevelShareBin();
|
||||
List<Integer> levelsCopy = new ArrayList<>();
|
||||
for (Integer level : this.levels) {
|
||||
levelsCopy.add(level);
|
||||
}
|
||||
shareBinCopy.id = this.id;
|
||||
shareBinCopy.levels = levelsCopy;
|
||||
shareBinCopy.share = this.share;
|
||||
return shareBinCopy;
|
||||
}
|
||||
}
|
||||
private List<AccountLevelShareBin> sharesByLevelV1;
|
||||
private List<AccountLevelShareBin> sharesByLevelV2;
|
||||
/** Generated lookup of share-bin by account level */
|
||||
private AccountLevelShareBin[] shareBinsByLevelV1;
|
||||
private AccountLevelShareBin[] shareBinsByLevelV2;
|
||||
|
||||
/** Share of block reward/fees to legacy QORA coin holders, by block height */
|
||||
public static class ShareByHeight {
|
||||
public int height;
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
public long share;
|
||||
}
|
||||
private List<ShareByHeight> qoraHoldersShareByHeight;
|
||||
|
||||
/** How many legacy QORA per 1 QORT of block reward. */
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private Long qoraPerQortReward;
|
||||
|
||||
/** Minimum number of accounts before a share bin is considered activated */
|
||||
private int minAccountsToActivateShareBin;
|
||||
|
||||
/** Min level at which share bin activation takes place; lower levels allow less than minAccountsPerShareBin */
|
||||
private int shareBinActivationMinLevel;
|
||||
|
||||
/**
|
||||
* Number of minted blocks required to reach next level from previous.
|
||||
* <p>
|
||||
@@ -150,7 +196,7 @@ public class BlockChain {
|
||||
private int minAccountLevelToMint;
|
||||
private int minAccountLevelForBlockSubmissions;
|
||||
private int minAccountLevelToRewardShare;
|
||||
private int maxRewardSharesPerMintingAccount;
|
||||
private int maxRewardSharesPerFounderMintingAccount;
|
||||
private int founderEffectiveMintingLevel;
|
||||
|
||||
/** Minimum time to retain online account signatures (ms) for block validity checks. */
|
||||
@@ -158,6 +204,20 @@ public class BlockChain {
|
||||
/** Maximum time to retain online account signatures (ms) for block validity checks, to allow for clock variance. */
|
||||
private long onlineAccountSignaturesMaxLifetime;
|
||||
|
||||
/** Feature trigger timestamp for ONLINE_ACCOUNTS_MODULUS time interval increase. Can't use
|
||||
* featureTriggers because unit tests need to set this value via Reflection. */
|
||||
private long onlineAccountsModulusV2Timestamp;
|
||||
|
||||
/** Snapshot timestamp for self sponsorship algo V1 */
|
||||
private long selfSponsorshipAlgoV1SnapshotTimestamp;
|
||||
|
||||
/** Max reward shares by block height */
|
||||
public static class MaxRewardSharesByTimestamp {
|
||||
public long timestamp;
|
||||
public int maxShares;
|
||||
}
|
||||
private List<MaxRewardSharesByTimestamp> maxRewardSharesByTimestamp;
|
||||
|
||||
/** Settings relating to CIYAM AT feature. */
|
||||
public static class CiyamAtSettings {
|
||||
/** Fee per step/op-code executed. */
|
||||
@@ -306,14 +366,14 @@ public class BlockChain {
|
||||
return this.maxBlockSize;
|
||||
}
|
||||
|
||||
// Custom transaction fees
|
||||
public long getNameRegistrationUnitFee() {
|
||||
return this.nameRegistrationUnitFee;
|
||||
// Online accounts
|
||||
public long getOnlineAccountsModulusV2Timestamp() {
|
||||
return this.onlineAccountsModulusV2Timestamp;
|
||||
}
|
||||
|
||||
public long getNameRegistrationUnitFeeTimestamp() {
|
||||
// FUTURE: we could use a separate structure to indicate fee adjustments for different transaction types
|
||||
return this.nameRegistrationUnitFeeTimestamp;
|
||||
// Self sponsorship algo
|
||||
public long getSelfSponsorshipAlgoV1SnapshotTimestamp() {
|
||||
return this.selfSponsorshipAlgoV1SnapshotTimestamp;
|
||||
}
|
||||
|
||||
/** Returns true if approval-needing transaction types require a txGroupId other than NO_GROUP. */
|
||||
@@ -329,16 +389,28 @@ public class BlockChain {
|
||||
return this.oneNamePerAccount;
|
||||
}
|
||||
|
||||
public List<Checkpoint> getCheckpoints() {
|
||||
return this.checkpoints;
|
||||
}
|
||||
|
||||
public List<RewardByHeight> getBlockRewardsByHeight() {
|
||||
return this.rewardsByHeight;
|
||||
}
|
||||
|
||||
public List<AccountLevelShareBin> getAccountLevelShareBins() {
|
||||
return this.sharesByLevel;
|
||||
public List<AccountLevelShareBin> getAccountLevelShareBinsV1() {
|
||||
return this.sharesByLevelV1;
|
||||
}
|
||||
|
||||
public AccountLevelShareBin[] getShareBinsByAccountLevel() {
|
||||
return this.shareBinsByLevel;
|
||||
public List<AccountLevelShareBin> getAccountLevelShareBinsV2() {
|
||||
return this.sharesByLevelV2;
|
||||
}
|
||||
|
||||
public AccountLevelShareBin[] getShareBinsByAccountLevelV1() {
|
||||
return this.shareBinsByLevelV1;
|
||||
}
|
||||
|
||||
public AccountLevelShareBin[] getShareBinsByAccountLevelV2() {
|
||||
return this.shareBinsByLevelV2;
|
||||
}
|
||||
|
||||
public List<Integer> getBlocksNeededByLevel() {
|
||||
@@ -349,14 +421,18 @@ public class BlockChain {
|
||||
return this.cumulativeBlocksByLevel;
|
||||
}
|
||||
|
||||
public long getQoraHoldersShare() {
|
||||
return this.qoraHoldersShare;
|
||||
}
|
||||
|
||||
public long getQoraPerQortReward() {
|
||||
return this.qoraPerQortReward;
|
||||
}
|
||||
|
||||
public int getMinAccountsToActivateShareBin() {
|
||||
return this.minAccountsToActivateShareBin;
|
||||
}
|
||||
|
||||
public int getShareBinActivationMinLevel() {
|
||||
return this.shareBinActivationMinLevel;
|
||||
}
|
||||
|
||||
public int getMinAccountLevelToMint() {
|
||||
return this.minAccountLevelToMint;
|
||||
}
|
||||
@@ -369,8 +445,8 @@ public class BlockChain {
|
||||
return this.minAccountLevelToRewardShare;
|
||||
}
|
||||
|
||||
public int getMaxRewardSharesPerMintingAccount() {
|
||||
return this.maxRewardSharesPerMintingAccount;
|
||||
public int getMaxRewardSharesPerFounderMintingAccount() {
|
||||
return this.maxRewardSharesPerFounderMintingAccount;
|
||||
}
|
||||
|
||||
public int getFounderEffectiveMintingLevel() {
|
||||
@@ -403,6 +479,14 @@ public class BlockChain {
|
||||
return this.featureTriggers.get(FeatureTrigger.shareBinFix.name()).intValue();
|
||||
}
|
||||
|
||||
public int getSharesByLevelV2Height() {
|
||||
return this.featureTriggers.get(FeatureTrigger.sharesByLevelV2Height.name()).intValue();
|
||||
}
|
||||
|
||||
public long getRewardShareLimitTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.rewardShareLimitTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getCalcChainWeightTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.calcChainWeightTimestamp.name()).longValue();
|
||||
}
|
||||
@@ -411,6 +495,39 @@ public class BlockChain {
|
||||
return this.featureTriggers.get(FeatureTrigger.transactionV5Timestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getTransactionV6Timestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.transactionV6Timestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getDisableReferenceTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.disableReferenceTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getIncreaseOnlineAccountsDifficultyTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.increaseOnlineAccountsDifficultyTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public int getSelfSponsorshipAlgoV1Height() {
|
||||
return this.featureTriggers.get(FeatureTrigger.selfSponsorshipAlgoV1Height.name()).intValue();
|
||||
}
|
||||
|
||||
public long getOnlineAccountMinterLevelValidationHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.onlineAccountMinterLevelValidationHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public long getFeeValidationFixTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.feeValidationFixTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getChatReferenceTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.chatReferenceTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getArbitraryOptionalFeeTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.arbitraryOptionalFeeTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
|
||||
// More complex getters for aspects that change by height or timestamp
|
||||
|
||||
public long getRewardAtHeight(int ourHeight) {
|
||||
@@ -430,6 +547,32 @@ public class BlockChain {
|
||||
throw new IllegalStateException(String.format("No block timing info available for height %d", ourHeight));
|
||||
}
|
||||
|
||||
public long getNameRegistrationUnitFeeAtTimestamp(long ourTimestamp) {
|
||||
for (int i = nameRegistrationUnitFees.size() - 1; i >= 0; --i)
|
||||
if (nameRegistrationUnitFees.get(i).timestamp <= ourTimestamp)
|
||||
return nameRegistrationUnitFees.get(i).fee;
|
||||
|
||||
// Default to system-wide unit fee
|
||||
return this.getUnitFee();
|
||||
}
|
||||
|
||||
public int getMaxRewardSharesAtTimestamp(long ourTimestamp) {
|
||||
for (int i = maxRewardSharesByTimestamp.size() - 1; i >= 0; --i)
|
||||
if (maxRewardSharesByTimestamp.get(i).timestamp <= ourTimestamp)
|
||||
return maxRewardSharesByTimestamp.get(i).maxShares;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
public long getQoraHoldersShareAtHeight(int ourHeight) {
|
||||
// Scan through for QORA share at our height
|
||||
for (int i = qoraHoldersShareByHeight.size() - 1; i >= 0; --i)
|
||||
if (qoraHoldersShareByHeight.get(i).height <= ourHeight)
|
||||
return qoraHoldersShareByHeight.get(i).share;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Validate blockchain config read from JSON */
|
||||
private void validateConfig() {
|
||||
if (this.genesisInfo == null)
|
||||
@@ -438,11 +581,14 @@ public class BlockChain {
|
||||
if (this.rewardsByHeight == null)
|
||||
Settings.throwValidationError("No \"rewardsByHeight\" entry found in blockchain config");
|
||||
|
||||
if (this.sharesByLevel == null)
|
||||
Settings.throwValidationError("No \"sharesByLevel\" entry found in blockchain config");
|
||||
if (this.sharesByLevelV1 == null)
|
||||
Settings.throwValidationError("No \"sharesByLevelV1\" entry found in blockchain config");
|
||||
|
||||
if (this.qoraHoldersShare == null)
|
||||
Settings.throwValidationError("No \"qoraHoldersShare\" entry found in blockchain config");
|
||||
if (this.sharesByLevelV2 == null)
|
||||
Settings.throwValidationError("No \"sharesByLevelV2\" entry found in blockchain config");
|
||||
|
||||
if (this.qoraHoldersShareByHeight == null)
|
||||
Settings.throwValidationError("No \"qoraHoldersShareByHeight\" entry found in blockchain config");
|
||||
|
||||
if (this.qoraPerQortReward == null)
|
||||
Settings.throwValidationError("No \"qoraPerQortReward\" entry found in blockchain config");
|
||||
@@ -479,13 +625,22 @@ public class BlockChain {
|
||||
if (!this.featureTriggers.containsKey(featureTrigger.name()))
|
||||
Settings.throwValidationError(String.format("Missing feature trigger \"%s\" in blockchain config", featureTrigger.name()));
|
||||
|
||||
// Check block reward share bounds
|
||||
long totalShare = this.qoraHoldersShare;
|
||||
// Check block reward share bounds (V1)
|
||||
long totalShareV1 = this.qoraHoldersShareByHeight.get(0).share;
|
||||
// Add share percents for account-level-based rewards
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevel)
|
||||
totalShare += accountLevelShareBin.share;
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV1)
|
||||
totalShareV1 += accountLevelShareBin.share;
|
||||
|
||||
if (totalShare < 0 || totalShare > 1_00000000L)
|
||||
if (totalShareV1 < 0 || totalShareV1 > 1_00000000L)
|
||||
Settings.throwValidationError("Total non-founder share out of bounds (0<x<1e8)");
|
||||
|
||||
// Check block reward share bounds (V2)
|
||||
long totalShareV2 = this.qoraHoldersShareByHeight.get(1).share;
|
||||
// Add share percents for account-level-based rewards
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV2)
|
||||
totalShareV2 += accountLevelShareBin.share;
|
||||
|
||||
if (totalShareV2 < 0 || totalShareV2 > 1_00000000L)
|
||||
Settings.throwValidationError("Total non-founder share out of bounds (0<x<1e8)");
|
||||
}
|
||||
|
||||
@@ -501,23 +656,34 @@ public class BlockChain {
|
||||
cumulativeBlocks += this.blocksNeededByLevel.get(level);
|
||||
}
|
||||
|
||||
// Generate lookup-array for account-level share bins
|
||||
AccountLevelShareBin lastAccountLevelShareBin = this.sharesByLevel.get(this.sharesByLevel.size() - 1);
|
||||
final int lastLevel = lastAccountLevelShareBin.levels.get(lastAccountLevelShareBin.levels.size() - 1);
|
||||
this.shareBinsByLevel = new AccountLevelShareBin[lastLevel];
|
||||
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevel)
|
||||
// Generate lookup-array for account-level share bins (V1)
|
||||
AccountLevelShareBin lastAccountLevelShareBinV1 = this.sharesByLevelV1.get(this.sharesByLevelV1.size() - 1);
|
||||
final int lastLevelV1 = lastAccountLevelShareBinV1.levels.get(lastAccountLevelShareBinV1.levels.size() - 1);
|
||||
this.shareBinsByLevelV1 = new AccountLevelShareBin[lastLevelV1];
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV1)
|
||||
for (int level : accountLevelShareBin.levels)
|
||||
// level 1 stored at index 0, level 2 stored at index 1, etc.
|
||||
// level 0 not allowed
|
||||
this.shareBinsByLevel[level - 1] = accountLevelShareBin;
|
||||
this.shareBinsByLevelV1[level - 1] = accountLevelShareBin;
|
||||
|
||||
// Generate lookup-array for account-level share bins (V2)
|
||||
AccountLevelShareBin lastAccountLevelShareBinV2 = this.sharesByLevelV2.get(this.sharesByLevelV2.size() - 1);
|
||||
final int lastLevelV2 = lastAccountLevelShareBinV2.levels.get(lastAccountLevelShareBinV2.levels.size() - 1);
|
||||
this.shareBinsByLevelV2 = new AccountLevelShareBin[lastLevelV2];
|
||||
for (AccountLevelShareBin accountLevelShareBin : this.sharesByLevelV2)
|
||||
for (int level : accountLevelShareBin.levels)
|
||||
// level 1 stored at index 0, level 2 stored at index 1, etc.
|
||||
// level 0 not allowed
|
||||
this.shareBinsByLevelV2[level - 1] = accountLevelShareBin;
|
||||
|
||||
// Convert collections to unmodifiable form
|
||||
this.rewardsByHeight = Collections.unmodifiableList(this.rewardsByHeight);
|
||||
this.sharesByLevel = Collections.unmodifiableList(this.sharesByLevel);
|
||||
this.sharesByLevelV1 = Collections.unmodifiableList(this.sharesByLevelV1);
|
||||
this.sharesByLevelV2 = Collections.unmodifiableList(this.sharesByLevelV2);
|
||||
this.blocksNeededByLevel = Collections.unmodifiableList(this.blocksNeededByLevel);
|
||||
this.cumulativeBlocksByLevel = Collections.unmodifiableList(this.cumulativeBlocksByLevel);
|
||||
this.blockTimingsByHeight = Collections.unmodifiableList(this.blockTimingsByHeight);
|
||||
this.qoraHoldersShareByHeight = Collections.unmodifiableList(this.qoraHoldersShareByHeight);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -529,6 +695,7 @@ public class BlockChain {
|
||||
|
||||
boolean isTopOnly = Settings.getInstance().isTopOnly();
|
||||
boolean archiveEnabled = Settings.getInstance().isArchiveEnabled();
|
||||
boolean isLite = Settings.getInstance().isLite();
|
||||
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
||||
boolean needsArchiveRebuild = false;
|
||||
BlockData chainTip;
|
||||
@@ -549,22 +716,44 @@ public class BlockChain {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate checkpoints
|
||||
// Limited to topOnly nodes for now, in order to reduce risk, and to solve a real-world problem with divergent topOnly nodes
|
||||
// TODO: remove the isTopOnly conditional below once this feature has had more testing time
|
||||
if (isTopOnly && !isLite) {
|
||||
List<Checkpoint> checkpoints = BlockChain.getInstance().getCheckpoints();
|
||||
for (Checkpoint checkpoint : checkpoints) {
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(checkpoint.height);
|
||||
if (blockData == null) {
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(checkpoint.height);
|
||||
}
|
||||
if (blockData == null) {
|
||||
LOGGER.trace("Couldn't find block for height {}", checkpoint.height);
|
||||
// This is likely due to the block being pruned, so is safe to ignore.
|
||||
// Continue, as there might be other blocks we can check more definitively.
|
||||
continue;
|
||||
}
|
||||
|
||||
byte[] signature = Base58.decode(checkpoint.signature);
|
||||
if (!Arrays.equals(signature, blockData.getSignature())) {
|
||||
LOGGER.info("Error: block at height {} with signature {} doesn't match checkpoint sig: {}. Bootstrapping...", checkpoint.height, Base58.encode(blockData.getSignature()), checkpoint.signature);
|
||||
needsArchiveRebuild = true;
|
||||
break;
|
||||
}
|
||||
LOGGER.info("Block at height {} matches checkpoint signature", blockData.getHeight());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
boolean hasBlocks = (chainTip != null && chainTip.getHeight() > 1);
|
||||
// Check first block is Genesis Block
|
||||
if (!isGenesisBlockValid() || needsArchiveRebuild) {
|
||||
try {
|
||||
rebuildBlockchain();
|
||||
|
||||
if (isTopOnly && hasBlocks) {
|
||||
// Top-only mode is enabled and we have blocks, so it's possible that the genesis block has been pruned
|
||||
// It's best not to validate it, and there's no real need to
|
||||
} else {
|
||||
// Check first block is Genesis Block
|
||||
if (!isGenesisBlockValid() || needsArchiveRebuild) {
|
||||
try {
|
||||
rebuildBlockchain();
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -573,9 +762,7 @@ public class BlockChain {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
repository.checkConsistency();
|
||||
|
||||
// Set the number of blocks to validate based on the pruned state of the chain
|
||||
// If pruned, subtract an extra 10 to allow room for error
|
||||
int blocksToValidate = (isTopOnly || archiveEnabled) ? Settings.getInstance().getPruneBlockLimit() - 10 : 1440;
|
||||
int blocksToValidate = Math.min(Settings.getInstance().getPruneBlockLimit() - 10, 1440);
|
||||
|
||||
int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - blocksToValidate, 1);
|
||||
BlockData detachedBlockData = repository.getBlockRepository().getDetachedBlockSignature(startHeight);
|
||||
|
||||
133
src/main/java/org/qortal/block/SelfSponsorshipAlgoV1Block.java
Normal file
133
src/main/java/org/qortal/block/SelfSponsorshipAlgoV1Block.java
Normal file
@@ -0,0 +1,133 @@
|
||||
package org.qortal.block;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.SelfSponsorshipAlgoV1;
|
||||
import org.qortal.api.model.AccountPenaltyStats;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.AccountPenaltyData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Self Sponsorship AlgoV1 Block
|
||||
* <p>
|
||||
* Selected block for the initial run on the "self sponsorship detection algorithm"
|
||||
*/
|
||||
public final class SelfSponsorshipAlgoV1Block {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(SelfSponsorshipAlgoV1Block.class);
|
||||
|
||||
|
||||
private SelfSponsorshipAlgoV1Block() {
|
||||
/* Do not instantiate */
|
||||
}
|
||||
|
||||
public static void processAccountPenalties(Block block) throws DataException {
|
||||
LOGGER.info("Running algo for block processing - this will take a while...");
|
||||
logPenaltyStats(block.repository);
|
||||
long startTime = System.currentTimeMillis();
|
||||
Set<AccountPenaltyData> penalties = getAccountPenalties(block.repository, -5000000);
|
||||
block.repository.getAccountRepository().updateBlocksMintedPenalties(penalties);
|
||||
long totalTime = System.currentTimeMillis() - startTime;
|
||||
String hash = getHash(penalties.stream().map(p -> p.getAddress()).collect(Collectors.toList()));
|
||||
LOGGER.info("{} penalty addresses processed (hash: {}). Total time taken: {} seconds", penalties.size(), hash, (int)(totalTime / 1000.0f));
|
||||
logPenaltyStats(block.repository);
|
||||
|
||||
int updatedCount = updateAccountLevels(block.repository, penalties);
|
||||
LOGGER.info("Account levels updated for {} penalty addresses", updatedCount);
|
||||
}
|
||||
|
||||
public static void orphanAccountPenalties(Block block) throws DataException {
|
||||
LOGGER.info("Running algo for block orphaning - this will take a while...");
|
||||
logPenaltyStats(block.repository);
|
||||
long startTime = System.currentTimeMillis();
|
||||
Set<AccountPenaltyData> penalties = getAccountPenalties(block.repository, 5000000);
|
||||
block.repository.getAccountRepository().updateBlocksMintedPenalties(penalties);
|
||||
long totalTime = System.currentTimeMillis() - startTime;
|
||||
String hash = getHash(penalties.stream().map(p -> p.getAddress()).collect(Collectors.toList()));
|
||||
LOGGER.info("{} penalty addresses orphaned (hash: {}). Total time taken: {} seconds", penalties.size(), hash, (int)(totalTime / 1000.0f));
|
||||
logPenaltyStats(block.repository);
|
||||
|
||||
int updatedCount = updateAccountLevels(block.repository, penalties);
|
||||
LOGGER.info("Account levels updated for {} penalty addresses", updatedCount);
|
||||
}
|
||||
|
||||
public static Set<AccountPenaltyData> getAccountPenalties(Repository repository, int penalty) throws DataException {
|
||||
final long snapshotTimestamp = BlockChain.getInstance().getSelfSponsorshipAlgoV1SnapshotTimestamp();
|
||||
Set<AccountPenaltyData> penalties = new LinkedHashSet<>();
|
||||
List<String> addresses = repository.getTransactionRepository().getConfirmedRewardShareCreatorsExcludingSelfShares();
|
||||
for (String address : addresses) {
|
||||
//System.out.println(String.format("address: %s", address));
|
||||
SelfSponsorshipAlgoV1 selfSponsorshipAlgoV1 = new SelfSponsorshipAlgoV1(repository, address, snapshotTimestamp, false);
|
||||
selfSponsorshipAlgoV1.run();
|
||||
//System.out.println(String.format("Penalty addresses: %d", selfSponsorshipAlgoV1.getPenaltyAddresses().size()));
|
||||
|
||||
for (String penaltyAddress : selfSponsorshipAlgoV1.getPenaltyAddresses()) {
|
||||
penalties.add(new AccountPenaltyData(penaltyAddress, penalty));
|
||||
}
|
||||
}
|
||||
return penalties;
|
||||
}
|
||||
|
||||
private static int updateAccountLevels(Repository repository, Set<AccountPenaltyData> accountPenalties) throws DataException {
|
||||
final List<Integer> cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
|
||||
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
|
||||
|
||||
int updatedCount = 0;
|
||||
|
||||
for (AccountPenaltyData penaltyData : accountPenalties) {
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(penaltyData.getAddress());
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
|
||||
// Shortcut for penalties
|
||||
if (effectiveBlocksMinted < 0) {
|
||||
accountData.setLevel(0);
|
||||
repository.getAccountRepository().setLevel(accountData);
|
||||
updatedCount++;
|
||||
LOGGER.trace(() -> String.format("Block minter %s dropped to level %d", accountData.getAddress(), accountData.getLevel()));
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel) {
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
accountData.setLevel(newLevel);
|
||||
repository.getAccountRepository().setLevel(accountData);
|
||||
updatedCount++;
|
||||
LOGGER.trace(() -> String.format("Block minter %s increased to level %d", accountData.getAddress(), accountData.getLevel()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return updatedCount;
|
||||
}
|
||||
|
||||
private static void logPenaltyStats(Repository repository) {
|
||||
try {
|
||||
LOGGER.info(getPenaltyStats(repository));
|
||||
|
||||
} catch (DataException e) {}
|
||||
}
|
||||
|
||||
private static AccountPenaltyStats getPenaltyStats(Repository repository) throws DataException {
|
||||
List<AccountData> accounts = repository.getAccountRepository().getPenaltyAccounts();
|
||||
return AccountPenaltyStats.fromAccounts(accounts);
|
||||
}
|
||||
|
||||
public static String getHash(List<String> penaltyAddresses) {
|
||||
if (penaltyAddresses == null || penaltyAddresses.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Collections.sort(penaltyAddresses);
|
||||
return Base58.encode(Crypto.digest(StringUtils.join(penaltyAddresses).getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@@ -40,6 +41,7 @@ public class AutoUpdate extends Thread {
|
||||
|
||||
public static final String JAR_FILENAME = "qortal.jar";
|
||||
public static final String NEW_JAR_FILENAME = "new-" + JAR_FILENAME;
|
||||
public static final String AGENTLIB_JVM_HOLDER_ARG = "-DQORTAL_agentlib=";
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(AutoUpdate.class);
|
||||
private static final long CHECK_INTERVAL = 20 * 60 * 1000L; // ms
|
||||
@@ -243,6 +245,11 @@ public class AutoUpdate extends Thread {
|
||||
// JVM arguments
|
||||
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
|
||||
|
||||
// Disable, but retain, any -agentlib JVM arg as sub-process might fail if it tries to reuse same port
|
||||
javaCmd = javaCmd.stream()
|
||||
.map(arg -> arg.replace("-agentlib", AGENTLIB_JVM_HOLDER_ARG))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Remove JNI options as they won't be supported by command-line 'java'
|
||||
// These are typically added by the AdvancedInstaller Java launcher EXE
|
||||
javaCmd.removeAll(Arrays.asList("abort", "exit", "vfprintf"));
|
||||
@@ -261,10 +268,19 @@ public class AutoUpdate extends Thread {
|
||||
Translator.INSTANCE.translate("SysTray", "APPLYING_UPDATE_AND_RESTARTING"),
|
||||
MessageType.INFO);
|
||||
|
||||
new ProcessBuilder(javaCmd).start();
|
||||
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
|
||||
|
||||
// New process will inherit our stdout and stderr
|
||||
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
|
||||
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
|
||||
|
||||
Process process = processBuilder.start();
|
||||
|
||||
// Nothing to pipe to new process, so close output stream (process's stdin)
|
||||
process.getOutputStream().close();
|
||||
|
||||
return true; // applying update OK
|
||||
} catch (IOException e) {
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(String.format("Failed to apply update: %s", e.getMessage()));
|
||||
|
||||
try {
|
||||
@@ -277,4 +293,77 @@ public class AutoUpdate extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean attemptRestart() {
|
||||
LOGGER.info(String.format("Restarting node..."));
|
||||
|
||||
// Give repository a chance to backup in case things go badly wrong (if enabled)
|
||||
if (Settings.getInstance().getRepositoryBackupInterval() > 0) {
|
||||
try {
|
||||
// Timeout if the database isn't ready for backing up after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
RepositoryManager.backup(true, "backup", timeout);
|
||||
|
||||
} catch (TimeoutException e) {
|
||||
LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage());
|
||||
// Continue with the node restart anyway...
|
||||
}
|
||||
}
|
||||
|
||||
// Call ApplyUpdate to end this process (unlocking current JAR so it can be replaced)
|
||||
String javaHome = System.getProperty("java.home");
|
||||
LOGGER.debug(String.format("Java home: %s", javaHome));
|
||||
|
||||
Path javaBinary = Paths.get(javaHome, "bin", "java");
|
||||
LOGGER.debug(String.format("Java binary: %s", javaBinary));
|
||||
|
||||
try {
|
||||
List<String> javaCmd = new ArrayList<>();
|
||||
// Java runtime binary itself
|
||||
javaCmd.add(javaBinary.toString());
|
||||
|
||||
// JVM arguments
|
||||
javaCmd.addAll(ManagementFactory.getRuntimeMXBean().getInputArguments());
|
||||
|
||||
// Disable, but retain, any -agentlib JVM arg as sub-process might fail if it tries to reuse same port
|
||||
javaCmd = javaCmd.stream()
|
||||
.map(arg -> arg.replace("-agentlib", AGENTLIB_JVM_HOLDER_ARG))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Remove JNI options as they won't be supported by command-line 'java'
|
||||
// These are typically added by the AdvancedInstaller Java launcher EXE
|
||||
javaCmd.removeAll(Arrays.asList("abort", "exit", "vfprintf"));
|
||||
|
||||
// Call ApplyUpdate using JAR
|
||||
javaCmd.addAll(Arrays.asList("-cp", JAR_FILENAME, ApplyUpdate.class.getCanonicalName()));
|
||||
|
||||
// Add command-line args saved from start-up
|
||||
String[] savedArgs = Controller.getInstance().getSavedArgs();
|
||||
if (savedArgs != null)
|
||||
javaCmd.addAll(Arrays.asList(savedArgs));
|
||||
|
||||
LOGGER.info(String.format("Restarting node with: %s", String.join(" ", javaCmd)));
|
||||
|
||||
SysTray.getInstance().showMessage(Translator.INSTANCE.translate("SysTray", "AUTO_UPDATE"), //TODO
|
||||
Translator.INSTANCE.translate("SysTray", "APPLYING_UPDATE_AND_RESTARTING"), //TODO
|
||||
MessageType.INFO);
|
||||
|
||||
ProcessBuilder processBuilder = new ProcessBuilder(javaCmd);
|
||||
|
||||
// New process will inherit our stdout and stderr
|
||||
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
|
||||
processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
|
||||
|
||||
Process process = processBuilder.start();
|
||||
|
||||
// Nothing to pipe to new process, so close output stream (process's stdin)
|
||||
process.getOutputStream().close();
|
||||
|
||||
return true; // restarting node OK
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(String.format("Failed to restart node: %s", e.getMessage()));
|
||||
|
||||
return true; // repo was okay, even if applying update failed
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ import org.qortal.transaction.Transaction;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
// Minting new blocks
|
||||
|
||||
public class BlockMinter extends Thread {
|
||||
@@ -61,8 +63,12 @@ public class BlockMinter extends Thread {
|
||||
public void run() {
|
||||
Thread.currentThread().setName("BlockMinter");
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
if (Settings.getInstance().getWipeUnconfirmedOnStart()) {
|
||||
if (Settings.getInstance().isTopOnly() || Settings.getInstance().isLite()) {
|
||||
// Top only and lite nodes do not sign blocks
|
||||
return;
|
||||
}
|
||||
if (Settings.getInstance().getWipeUnconfirmedOnStart()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Wipe existing unconfirmed transactions
|
||||
List<TransactionData> unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions();
|
||||
|
||||
@@ -72,355 +78,381 @@ public class BlockMinter extends Thread {
|
||||
}
|
||||
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage());
|
||||
// Fall-through to normal behaviour in case we can recover
|
||||
}
|
||||
}
|
||||
|
||||
BlockData previousBlockData = null;
|
||||
|
||||
// Vars to keep track of blocks that were skipped due to chain weight
|
||||
byte[] parentSignatureForLastLowWeightBlock = null;
|
||||
Long timeOfLastLowWeightBlock = null;
|
||||
|
||||
List<Block> newBlocks = new ArrayList<>();
|
||||
|
||||
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
BlockData previousBlockData = null;
|
||||
|
||||
// Vars to keep track of blocks that were skipped due to chain weight
|
||||
byte[] parentSignatureForLastLowWeightBlock = null;
|
||||
Long timeOfLastLowWeightBlock = null;
|
||||
|
||||
List<Block> newBlocks = new ArrayList<>();
|
||||
|
||||
// Flags for tracking change in whether minting is possible,
|
||||
// so we can notify Controller, and further update SysTray, etc.
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
while (running) {
|
||||
repository.discardChanges(); // Free repository locks, if any
|
||||
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
|
||||
wasMintingPossible = isMintingPossible;
|
||||
|
||||
// Sleep for a while
|
||||
Thread.sleep(1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
// No online accounts? (e.g. during startup)
|
||||
if (Controller.getInstance().getOnlineAccounts().isEmpty())
|
||||
continue;
|
||||
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
List<Peer> peers = Network.getInstance().getHandshakedPeers();
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (Synchronizer.getInstance().getRecoveryMode() == false)
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
|
||||
continue;
|
||||
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
}
|
||||
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
|
||||
try {
|
||||
// Clear repository session state so we have latest view of data
|
||||
// Free up any repository locks
|
||||
repository.discardChanges();
|
||||
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
// Sleep for a while.
|
||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
for (Block testBlock : newBlocks) {
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
continue;
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
// No online accounts for current timestamp? (e.g. during startup)
|
||||
if (!OnlineAccountsManager.getInstance().hasOnlineAccounts())
|
||||
continue;
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
}
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (goodBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (Synchronizer.getInstance().getRecoveryMode() == false)
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
}
|
||||
}
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
|
||||
continue;
|
||||
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
|
||||
try {
|
||||
newBlock.process();
|
||||
// Clear repository session state so we have latest view of data
|
||||
repository.discardChanges();
|
||||
|
||||
repository.saveChanges();
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
continue;
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
continue;
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
try {
|
||||
newBlock.process();
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
} catch (ArithmeticException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
BlockData newBlockData = newBlock.getBlockData();
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
Network.getInstance().broadcastOurChain();
|
||||
}
|
||||
|
||||
Network network = Network.getInstance();
|
||||
network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData));
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter", e);
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -478,7 +510,22 @@ public class BlockMinter extends Thread {
|
||||
throw new DataException("Ignoring attempt to mint testing block for non-test chain!");
|
||||
|
||||
// Ensure mintingAccount is 'online' so blocks can be minted
|
||||
Controller.getInstance().ensureTestingAccountsOnline(mintingAndOnlineAccounts);
|
||||
OnlineAccountsManager.getInstance().ensureTestingAccountsOnline(mintingAndOnlineAccounts);
|
||||
|
||||
PrivateKeyAccount mintingAccount = mintingAndOnlineAccounts[0];
|
||||
|
||||
Block block = mintTestingBlockRetainingTimestamps(repository, mintingAccount);
|
||||
assertNotNull("Minted block must not be null", block);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
public static Block mintTestingBlockUnvalidated(Repository repository, PrivateKeyAccount... mintingAndOnlineAccounts) throws DataException {
|
||||
if (!BlockChain.getInstance().isTestChain())
|
||||
throw new DataException("Ignoring attempt to mint testing block for non-test chain!");
|
||||
|
||||
// Ensure mintingAccount is 'online' so blocks can be minted
|
||||
OnlineAccountsManager.getInstance().ensureTestingAccountsOnline(mintingAndOnlineAccounts);
|
||||
|
||||
PrivateKeyAccount mintingAccount = mintingAndOnlineAccounts[0];
|
||||
|
||||
@@ -489,6 +536,8 @@ public class BlockMinter extends Thread {
|
||||
BlockData previousBlockData = repository.getBlockRepository().getLastBlock();
|
||||
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null)
|
||||
return null;
|
||||
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
@@ -544,25 +593,30 @@ public class BlockMinter extends Thread {
|
||||
}
|
||||
NumberFormat formatter = new DecimalFormat("0.###E0");
|
||||
|
||||
List<Peer> peers = Network.getInstance().getHandshakedPeers();
|
||||
List<Peer> peers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
// Loop through handshaked peers and check for any new block candidates
|
||||
for (Peer peer : peers) {
|
||||
if (peer.getCommonBlockData() != null && peer.getCommonBlockData().getCommonBlockSummary() != null) {
|
||||
// This peer has common block data
|
||||
CommonBlockData commonBlockData = peer.getCommonBlockData();
|
||||
BlockSummaryData commonBlockSummaryData = commonBlockData.getCommonBlockSummary();
|
||||
if (commonBlockData.getChainWeight() != null) {
|
||||
if (commonBlockData.getChainWeight() != null && peer.getCommonBlockData().getBlockSummariesAfterCommonBlock() != null) {
|
||||
// The synchronizer has calculated this peer's chain weight
|
||||
BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
|
||||
BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
|
||||
BigInteger peerChainWeight = commonBlockData.getChainWeight();
|
||||
if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
|
||||
// This peer has a higher weight chain than ours
|
||||
LOGGER.debug("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
return true;
|
||||
if (!Synchronizer.getInstance().containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
|
||||
// .. and it doesn't hold any invalid blocks
|
||||
BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
|
||||
BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
|
||||
BigInteger peerChainWeight = commonBlockData.getChainWeight();
|
||||
if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
|
||||
// This peer has a higher weight chain than ours
|
||||
LOGGER.info("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
return true;
|
||||
|
||||
} else {
|
||||
LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
LOGGER.debug("Peer {} has an invalid block", peer);
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Peer {} has no chain weight", peer);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
189
src/main/java/org/qortal/controller/LiteNode.java
Normal file
189
src/main/java/org/qortal/controller/LiteNode.java
Normal file
@@ -0,0 +1,189 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.*;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
|
||||
import static org.qortal.network.message.MessageType.*;
|
||||
|
||||
public class LiteNode {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(LiteNode.class);
|
||||
|
||||
private static LiteNode instance;
|
||||
|
||||
|
||||
public Map<Integer, Long> pendingRequests = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
public int MAX_TRANSACTIONS_PER_MESSAGE = 100;
|
||||
|
||||
|
||||
public LiteNode() {
|
||||
|
||||
}
|
||||
|
||||
public static synchronized LiteNode getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new LiteNode();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Fetch account data from peers for given QORT address
|
||||
* @param address - the QORT address to query
|
||||
* @return accountData - the account data for this address, or null if not retrieved
|
||||
*/
|
||||
public AccountData fetchAccountData(String address) {
|
||||
GetAccountMessage getAccountMessage = new GetAccountMessage(address);
|
||||
AccountMessage accountMessage = (AccountMessage) this.sendMessage(getAccountMessage, ACCOUNT);
|
||||
if (accountMessage == null) {
|
||||
return null;
|
||||
}
|
||||
return accountMessage.getAccountData();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch account balance data from peers for given QORT address and asset ID
|
||||
* @param address - the QORT address to query
|
||||
* @return balance - the balance for this address and assetId, or null if not retrieved
|
||||
*/
|
||||
public AccountBalanceData fetchAccountBalance(String address, long assetId) {
|
||||
GetAccountBalanceMessage getAccountMessage = new GetAccountBalanceMessage(address, assetId);
|
||||
AccountBalanceMessage accountMessage = (AccountBalanceMessage) this.sendMessage(getAccountMessage, ACCOUNT_BALANCE);
|
||||
if (accountMessage == null) {
|
||||
return null;
|
||||
}
|
||||
return accountMessage.getAccountBalanceData();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch list of transactions for given QORT address
|
||||
* @param address - the QORT address to query
|
||||
* @param limit - the maximum number of results to return
|
||||
* @param offset - the starting index
|
||||
* @return a list of TransactionData objects, or null if not retrieved
|
||||
*/
|
||||
public List<TransactionData> fetchAccountTransactions(String address, int limit, int offset) {
|
||||
List<TransactionData> allTransactions = new ArrayList<>();
|
||||
if (limit == 0) {
|
||||
limit = Integer.MAX_VALUE;
|
||||
}
|
||||
int batchSize = Math.min(limit, MAX_TRANSACTIONS_PER_MESSAGE);
|
||||
|
||||
while (allTransactions.size() < limit) {
|
||||
GetAccountTransactionsMessage getAccountTransactionsMessage = new GetAccountTransactionsMessage(address, batchSize, offset);
|
||||
TransactionsMessage transactionsMessage = (TransactionsMessage) this.sendMessage(getAccountTransactionsMessage, TRANSACTIONS);
|
||||
if (transactionsMessage == null) {
|
||||
// An error occurred, so give up instead of returning partial results
|
||||
return null;
|
||||
}
|
||||
allTransactions.addAll(transactionsMessage.getTransactions());
|
||||
if (transactionsMessage.getTransactions().size() < batchSize) {
|
||||
// No more transactions to fetch
|
||||
break;
|
||||
}
|
||||
offset += batchSize;
|
||||
}
|
||||
return allTransactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch list of names for given QORT address
|
||||
* @param address - the QORT address to query
|
||||
* @return a list of NameData objects, or null if not retrieved
|
||||
*/
|
||||
public List<NameData> fetchAccountNames(String address) {
|
||||
GetAccountNamesMessage getAccountNamesMessage = new GetAccountNamesMessage(address);
|
||||
NamesMessage namesMessage = (NamesMessage) this.sendMessage(getAccountNamesMessage, NAMES);
|
||||
if (namesMessage == null) {
|
||||
return null;
|
||||
}
|
||||
return namesMessage.getNameDataList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch info about a registered name
|
||||
* @param name - the name to query
|
||||
* @return a NameData object, or null if not retrieved
|
||||
*/
|
||||
public NameData fetchNameData(String name) {
|
||||
GetNameMessage getNameMessage = new GetNameMessage(name);
|
||||
NamesMessage namesMessage = (NamesMessage) this.sendMessage(getNameMessage, NAMES);
|
||||
if (namesMessage == null) {
|
||||
return null;
|
||||
}
|
||||
List<NameData> nameDataList = namesMessage.getNameDataList();
|
||||
if (nameDataList == null || nameDataList.size() != 1) {
|
||||
return null;
|
||||
}
|
||||
// We are only expecting a single item in the list
|
||||
return nameDataList.get(0);
|
||||
}
|
||||
|
||||
|
||||
private Message sendMessage(Message message, MessageType expectedResponseMessageType) {
|
||||
// This asks a random peer for the data
|
||||
// TODO: ask multiple peers, and disregard everything if there are any significant differences in the responses
|
||||
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that only have genesis block
|
||||
// TODO: peers.removeIf(Controller.hasOnlyGenesisBlock);
|
||||
|
||||
// Disregard peers that are on an old version
|
||||
peers.removeIf(Controller.hasOldVersion);
|
||||
|
||||
// Disregard peers that are on a known inferior chain tip
|
||||
// TODO: peers.removeIf(Controller.hasInferiorChainTip);
|
||||
|
||||
if (peers.isEmpty()) {
|
||||
LOGGER.info("No peers available to send {} message to", message.getType());
|
||||
return null;
|
||||
}
|
||||
|
||||
// Pick random peer
|
||||
int index = new SecureRandom().nextInt(peers.size());
|
||||
Peer peer = peers.get(index);
|
||||
|
||||
LOGGER.info("Sending {} message to peer {}...", message.getType(), peer);
|
||||
|
||||
Message responseMessage;
|
||||
|
||||
try {
|
||||
responseMessage = peer.getResponse(message);
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (responseMessage == null) {
|
||||
LOGGER.info("Peer {} didn't respond to {} message", peer, message.getType());
|
||||
return null;
|
||||
}
|
||||
else if (responseMessage.getType() != expectedResponseMessageType) {
|
||||
LOGGER.info("Peer responded with unexpected message type {} (should be {})", peer, responseMessage.getType(), expectedResponseMessageType);
|
||||
return null;
|
||||
}
|
||||
|
||||
LOGGER.info("Peer {} responded with {} message", peer, responseMessage.getType());
|
||||
|
||||
return responseMessage;
|
||||
}
|
||||
|
||||
}
|
||||
858
src/main/java/org/qortal/controller/OnlineAccountsManager.java
Normal file
858
src/main/java/org/qortal/controller/OnlineAccountsManager.java
Normal file
@@ -0,0 +1,858 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.crypto.MemoryPoW;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class OnlineAccountsManager {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsManager.class);
|
||||
|
||||
// 'Current' as in 'now'
|
||||
|
||||
/**
|
||||
* How long online accounts signatures last before they expire.
|
||||
*/
|
||||
private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
|
||||
private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
|
||||
|
||||
/**
|
||||
* How many 'current' timestamp-sets of online accounts we cache.
|
||||
*/
|
||||
private static final int MAX_CACHED_TIMESTAMP_SETS = 2;
|
||||
|
||||
/**
|
||||
* How many timestamp-sets of online accounts we cache for 'latest blocks'.
|
||||
*/
|
||||
private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 3;
|
||||
|
||||
private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_COMPUTE_INTERVAL = 5 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 60 * 1000L; // ms
|
||||
// After switching to a new online timestamp, we "burst" the online accounts requests
|
||||
// at an increased interval for a specified amount of time
|
||||
private static final long ONLINE_ACCOUNTS_BROADCAST_BURST_INTERVAL = 5 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_BROADCAST_BURST_LENGTH = 5 * 60 * 1000L; // ms
|
||||
|
||||
private static final long ONLINE_ACCOUNTS_COMPUTE_INITIAL_SLEEP_INTERVAL = 30 * 1000L; // ms
|
||||
|
||||
// MemoryPoW - mainnet
|
||||
public static final int POW_BUFFER_SIZE = 1 * 1024 * 1024; // bytes
|
||||
public static final int POW_DIFFICULTY_V1 = 18; // leading zero bits
|
||||
public static final int POW_DIFFICULTY_V2 = 19; // leading zero bits
|
||||
|
||||
// MemoryPoW - testnet
|
||||
public static final int POW_BUFFER_SIZE_TESTNET = 1 * 1024 * 1024; // bytes
|
||||
public static final int POW_DIFFICULTY_TESTNET = 5; // leading zero bits
|
||||
|
||||
// IMPORTANT: if we ever need to dynamically modify the buffer size using a feature trigger, the
|
||||
// pre-allocated buffer below will NOT work, and we should instead use a dynamically allocated
|
||||
// one for the transition period.
|
||||
private static long[] POW_VERIFY_WORK_BUFFER = new long[getPoWBufferSize() / 8];
|
||||
|
||||
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts"));
|
||||
private volatile boolean isStopping = false;
|
||||
|
||||
private final Set<OnlineAccountData> onlineAccountsImportQueue = ConcurrentHashMap.newKeySet();
|
||||
|
||||
/**
|
||||
* Cache of 'current' online accounts, keyed by timestamp
|
||||
*/
|
||||
private final Map<Long, Set<OnlineAccountData>> currentOnlineAccounts = new ConcurrentHashMap<>();
|
||||
/**
|
||||
* Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key.
|
||||
*/
|
||||
private final Map<Long, Map<Byte, byte[]>> currentOnlineAccountsHashes = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* Cache of online accounts for latest blocks - not necessarily 'current' / now.
|
||||
* <i>Probably</i> only accessed / modified by a single Synchronizer thread.
|
||||
*/
|
||||
private final SortedMap<Long, Set<OnlineAccountData>> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>();
|
||||
|
||||
private long lastOnlineAccountsRequest = 0;
|
||||
|
||||
private boolean hasOurOnlineAccounts = false;
|
||||
|
||||
public static long getOnlineTimestampModulus() {
|
||||
Long now = NTP.getTime();
|
||||
if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV2Timestamp()) {
|
||||
return ONLINE_TIMESTAMP_MODULUS_V2;
|
||||
}
|
||||
return ONLINE_TIMESTAMP_MODULUS_V1;
|
||||
}
|
||||
public static Long getCurrentOnlineAccountTimestamp() {
|
||||
Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return null;
|
||||
|
||||
long onlineTimestampModulus = getOnlineTimestampModulus();
|
||||
return (now / onlineTimestampModulus) * onlineTimestampModulus;
|
||||
}
|
||||
|
||||
public static long toOnlineAccountTimestamp(long timestamp) {
|
||||
return (timestamp / getOnlineTimestampModulus()) * getOnlineTimestampModulus();
|
||||
}
|
||||
|
||||
private static int getPoWBufferSize() {
|
||||
if (Settings.getInstance().isTestNet())
|
||||
return POW_BUFFER_SIZE_TESTNET;
|
||||
|
||||
return POW_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
private static int getPoWDifficulty(long timestamp) {
|
||||
if (Settings.getInstance().isTestNet())
|
||||
return POW_DIFFICULTY_TESTNET;
|
||||
|
||||
if (timestamp >= BlockChain.getInstance().getIncreaseOnlineAccountsDifficultyTimestamp())
|
||||
return POW_DIFFICULTY_V2;
|
||||
|
||||
return POW_DIFFICULTY_V1;
|
||||
}
|
||||
|
||||
private OnlineAccountsManager() {
|
||||
}
|
||||
|
||||
private static class SingletonContainer {
|
||||
private static final OnlineAccountsManager INSTANCE = new OnlineAccountsManager();
|
||||
}
|
||||
|
||||
public static OnlineAccountsManager getInstance() {
|
||||
return SingletonContainer.INSTANCE;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
// Expire old online accounts signatures
|
||||
executor.scheduleAtFixedRate(this::expireOldOnlineAccounts, ONLINE_ACCOUNTS_TASKS_INTERVAL, ONLINE_ACCOUNTS_TASKS_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
// Request online accounts from peers
|
||||
executor.scheduleAtFixedRate(this::requestRemoteOnlineAccounts, ONLINE_ACCOUNTS_BROADCAST_BURST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_BURST_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
// Process import queue
|
||||
executor.scheduleWithFixedDelay(this::processOnlineAccountsImportQueue, ONLINE_ACCOUNTS_QUEUE_INTERVAL, ONLINE_ACCOUNTS_QUEUE_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
// Send our online accounts (using increased initial delay)
|
||||
// This allows some time for initial online account lists to be retrieved, and
|
||||
// reduces the chances of the same nonce being computed twice
|
||||
executor.scheduleAtFixedRate(this::sendOurOnlineAccountsInfo, ONLINE_ACCOUNTS_COMPUTE_INITIAL_SLEEP_INTERVAL, ONLINE_ACCOUNTS_COMPUTE_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
isStopping = true;
|
||||
executor.shutdownNow();
|
||||
}
|
||||
|
||||
// Testing support
|
||||
public void ensureTestingAccountsOnline(PrivateKeyAccount... onlineAccounts) {
|
||||
if (!BlockChain.getInstance().isTestChain()) {
|
||||
LOGGER.warn("Ignoring attempt to ensure test account is online for non-test chain!");
|
||||
return;
|
||||
}
|
||||
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return;
|
||||
|
||||
byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
|
||||
|
||||
Set<OnlineAccountData> replacementAccounts = new HashSet<>();
|
||||
for (PrivateKeyAccount onlineAccount : onlineAccounts) {
|
||||
// Check mintingAccount is actually reward-share?
|
||||
|
||||
byte[] signature = Qortal25519Extras.signForAggregation(onlineAccount.getPrivateKey(), timestampBytes);
|
||||
byte[] publicKey = onlineAccount.getPublicKey();
|
||||
|
||||
Integer nonce = new Random().nextInt(500000);
|
||||
|
||||
OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey, nonce);
|
||||
replacementAccounts.add(ourOnlineAccountData);
|
||||
}
|
||||
|
||||
this.currentOnlineAccounts.clear();
|
||||
addAccounts(replacementAccounts);
|
||||
}
|
||||
|
||||
// Online accounts import queue
|
||||
|
||||
private void processOnlineAccountsImportQueue() {
|
||||
if (this.onlineAccountsImportQueue.isEmpty())
|
||||
// Nothing to do
|
||||
return;
|
||||
|
||||
LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size());
|
||||
|
||||
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
|
||||
Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>();
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
|
||||
if (isStopping)
|
||||
return;
|
||||
|
||||
// Skip this account if it's already validated
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.get(onlineAccountData.getTimestamp());
|
||||
if (onlineAccounts != null && onlineAccounts.contains(onlineAccountData)) {
|
||||
// We have already validated this online account
|
||||
onlineAccountsImportQueue.remove(onlineAccountData);
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData);
|
||||
if (isValid)
|
||||
onlineAccountsToAdd.add(onlineAccountData);
|
||||
|
||||
// Don't remove from the queue yet - we'll do this at the end of the process
|
||||
// This prevents duplicates being added to the queue whilst it's being processed
|
||||
onlineAccountsToRemove.add(onlineAccountData);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue while verifying online accounts", e);
|
||||
|
||||
} finally {
|
||||
if (!onlineAccountsToAdd.isEmpty()) {
|
||||
LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size());
|
||||
addAccounts(onlineAccountsToAdd);
|
||||
}
|
||||
onlineAccountsImportQueue.removeAll(onlineAccountsToRemove);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if supplied onlineAccountData is superior (i.e. has a nonce value) than existing record.
|
||||
* Two entries are considered equal even if the nonce differs, to prevent multiple variations
|
||||
* co-existing. For this reason, we need to be able to check if a new OnlineAccountData entry should
|
||||
* replace the existing one, which may be missing the nonce.
|
||||
* @param onlineAccountData
|
||||
* @return true if supplied data is superior to existing entry
|
||||
*/
|
||||
private boolean isOnlineAccountsDataSuperior(OnlineAccountData onlineAccountData) {
|
||||
if (onlineAccountData.getNonce() == null || onlineAccountData.getNonce() < 0) {
|
||||
// New online account data has no usable nonce value, so it won't be better than anything we already have
|
||||
return false;
|
||||
}
|
||||
|
||||
// New online account data has a nonce value, so check if there is any existing data to compare against
|
||||
Set<OnlineAccountData> existingOnlineAccountsForTimestamp = this.currentOnlineAccounts.get(onlineAccountData.getTimestamp());
|
||||
if (existingOnlineAccountsForTimestamp == null) {
|
||||
// No existing online accounts data with this timestamp yet
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if a duplicate entry exists
|
||||
OnlineAccountData existingOnlineAccountData = null;
|
||||
for (OnlineAccountData existingAccount : existingOnlineAccountsForTimestamp) {
|
||||
if (existingAccount.equals(onlineAccountData)) {
|
||||
// Found existing online account data
|
||||
existingOnlineAccountData = existingAccount;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (existingOnlineAccountData == null) {
|
||||
// No existing online accounts data, so nothing to compare
|
||||
return false;
|
||||
}
|
||||
|
||||
if (existingOnlineAccountData.getNonce() == null || existingOnlineAccountData.getNonce() < 0) {
|
||||
// Existing data has no usable nonce value(s) so we want to replace it with the new one
|
||||
return true;
|
||||
}
|
||||
|
||||
// Both new and old data have nonce values so the new data isn't considered superior
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Utilities
|
||||
|
||||
public static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) {
|
||||
if (inplaceArray == null)
|
||||
return Arrays.copyOf(otherArray, otherArray.length);
|
||||
|
||||
// Start from index 1 to enforce static leading byte
|
||||
for (int i = 1; i < otherArray.length; i++)
|
||||
inplaceArray[i] ^= otherArray[i];
|
||||
|
||||
return inplaceArray;
|
||||
}
|
||||
|
||||
private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return false;
|
||||
|
||||
byte[] rewardSharePublicKey = onlineAccountData.getPublicKey();
|
||||
long onlineAccountTimestamp = onlineAccountData.getTimestamp();
|
||||
|
||||
// Check timestamp is 'recent' here
|
||||
if (Math.abs(onlineAccountTimestamp - now) > getOnlineTimestampModulus() * 2) {
|
||||
LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check timestamp is a multiple of online timestamp modulus
|
||||
if (onlineAccountTimestamp % getOnlineTimestampModulus() != 0) {
|
||||
LOGGER.trace(() -> String.format("Rejecting online account %s with invalid timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify signature
|
||||
byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp());
|
||||
boolean isSignatureValid = Qortal25519Extras.verifyAggregated(rewardSharePublicKey, onlineAccountData.getSignature(), data);
|
||||
if (!isSignatureValid) {
|
||||
LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey)));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Qortal: check online account is actually reward-share
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't even exist - probably not a good sign
|
||||
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey)));
|
||||
return false;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate mempow
|
||||
if (!getInstance().verifyMemoryPoW(onlineAccountData, POW_VERIFY_WORK_BUFFER)) {
|
||||
LOGGER.trace(() -> String.format("Rejecting online reward-share for account %s due to invalid PoW nonce", mintingAccount.getAddress()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Adds accounts, maybe rebuilds hashes, returns whether any new accounts were added / hashes rebuilt. */
|
||||
private boolean addAccounts(Collection<OnlineAccountData> onlineAccountsToAdd) {
|
||||
// For keeping track of which hashes to rebuild
|
||||
Map<Long, Set<Byte>> hashesToRebuild = new HashMap<>();
|
||||
|
||||
for (OnlineAccountData onlineAccountData : onlineAccountsToAdd) {
|
||||
boolean isNewEntry = this.addAccount(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]);
|
||||
}
|
||||
|
||||
if (hashesToRebuild.isEmpty())
|
||||
return false;
|
||||
|
||||
for (var entry : hashesToRebuild.entrySet()) {
|
||||
Long timestamp = entry.getKey();
|
||||
|
||||
LOGGER.trace(() -> String.format("Rehashing for timestamp %d and leading bytes %s",
|
||||
timestamp,
|
||||
entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", "))
|
||||
)
|
||||
);
|
||||
|
||||
for (Byte leadingByte : entry.getValue()) {
|
||||
byte[] pubkeyHash = currentOnlineAccounts.get(timestamp).stream()
|
||||
.map(OnlineAccountData::getPublicKey)
|
||||
.filter(publicKey -> leadingByte == publicKey[0])
|
||||
.reduce(null, OnlineAccountsManager::xorByteArrayInPlace);
|
||||
|
||||
currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentHashMap<>()).put(leadingByte, pubkeyHash);
|
||||
|
||||
LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys",
|
||||
HashCode.fromBytes(pubkeyHash),
|
||||
timestamp,
|
||||
leadingByte,
|
||||
currentOnlineAccounts.get(timestamp).stream()
|
||||
.map(OnlineAccountData::getPublicKey)
|
||||
.filter(publicKey -> leadingByte == publicKey[0])
|
||||
.count()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.trace(String.format("we have online accounts for timestamps: %s", String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", ")))));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean addAccount(OnlineAccountData onlineAccountData) {
|
||||
byte[] rewardSharePublicKey = onlineAccountData.getPublicKey();
|
||||
long onlineAccountTimestamp = onlineAccountData.getTimestamp();
|
||||
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountTimestamp, k -> ConcurrentHashMap.newKeySet());
|
||||
|
||||
boolean isSuperiorEntry = isOnlineAccountsDataSuperior(onlineAccountData);
|
||||
if (isSuperiorEntry)
|
||||
// Remove existing inferior entry so it can be re-added below (it's likely the existing copy is missing a nonce value)
|
||||
onlineAccounts.remove(onlineAccountData);
|
||||
|
||||
boolean isNewEntry = onlineAccounts.add(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
else
|
||||
LOGGER.trace(() -> String.format("Not updating existing online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
|
||||
return isNewEntry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expire old entries.
|
||||
*/
|
||||
private void expireOldOnlineAccounts() {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
|
||||
final long cutoffThreshold = now - MAX_CACHED_TIMESTAMP_SETS * getOnlineTimestampModulus();
|
||||
this.currentOnlineAccounts.keySet().removeIf(timestamp -> timestamp < cutoffThreshold);
|
||||
this.currentOnlineAccountsHashes.keySet().removeIf(timestamp -> timestamp < cutoffThreshold);
|
||||
}
|
||||
|
||||
/**
|
||||
* Request data from other peers
|
||||
*/
|
||||
private void requestRemoteOnlineAccounts() {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
|
||||
// Don't bother if we're not up to date
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
return;
|
||||
|
||||
long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (now - onlineAccountsTimestamp >= ONLINE_ACCOUNTS_BROADCAST_BURST_LENGTH) {
|
||||
// New online timestamp started more than 5 mins ago - we probably don't need to request so frequently
|
||||
|
||||
if (Controller.uptime() < ONLINE_ACCOUNTS_BROADCAST_BURST_LENGTH) {
|
||||
// The node recently started up, so we should request at the burst interval
|
||||
// This could allow accounts to move around the network more easily when an auto update is occurring
|
||||
}
|
||||
else if (now - lastOnlineAccountsRequest < ONLINE_ACCOUNTS_BROADCAST_INTERVAL) {
|
||||
// We already requested online accounts in the last minute, so no need to request again
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.debug("Requesting online accounts via broadcast...");
|
||||
|
||||
lastOnlineAccountsRequest = now;
|
||||
Message messageV3 = new GetOnlineAccountsV3Message(currentOnlineAccountsHashes);
|
||||
Network.getInstance().broadcast(peer -> messageV3);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send online accounts that are minting on this node.
|
||||
*/
|
||||
private void sendOurOnlineAccountsInfo() {
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return;
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't submit if we're more than 2 hours out of sync (unless we're in recovery mode)
|
||||
final Long minLatestBlockTimestamp = now - (2 * 60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp) && !Synchronizer.getInstance().getRecoveryMode()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// 'next' timestamp (prioritize this as it's the most important, if mempow active)
|
||||
final long nextOnlineAccountsTimestamp = toOnlineAccountTimestamp(now) + getOnlineTimestampModulus();
|
||||
boolean success = computeOurAccountsForTimestamp(nextOnlineAccountsTimestamp);
|
||||
if (!success) {
|
||||
// We didn't compute the required nonce value(s), and so can't proceed until they have been retried
|
||||
return;
|
||||
}
|
||||
|
||||
// 'current' timestamp
|
||||
computeOurAccountsForTimestamp(onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
private boolean computeOurAccountsForTimestamp(long onlineAccountsTimestamp) {
|
||||
List<MintingAccountData> mintingAccounts;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
mintingAccounts = repository.getAccountRepository().getMintingAccounts();
|
||||
|
||||
// We have no accounts to send
|
||||
if (mintingAccounts.isEmpty())
|
||||
return false;
|
||||
|
||||
// Only active reward-shares allowed
|
||||
Iterator<MintingAccountData> iterator = mintingAccounts.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
MintingAccountData mintingAccountData = iterator.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't even exist - probably not a good sign
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage()));
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
|
||||
List<OnlineAccountData> ourOnlineAccounts = new ArrayList<>();
|
||||
|
||||
int remaining = mintingAccounts.size();
|
||||
for (MintingAccountData mintingAccountData : mintingAccounts) {
|
||||
remaining--;
|
||||
byte[] privateKey = mintingAccountData.getPrivateKey();
|
||||
byte[] publicKey = Crypto.toPublicKey(privateKey);
|
||||
|
||||
// We don't want to compute the online account nonce and signature again if it already exists
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountsTimestamp, k -> ConcurrentHashMap.newKeySet());
|
||||
boolean alreadyExists = onlineAccounts.stream().anyMatch(a -> Arrays.equals(a.getPublicKey(), publicKey));
|
||||
if (alreadyExists) {
|
||||
this.hasOurOnlineAccounts = true;
|
||||
|
||||
if (remaining > 0) {
|
||||
// Move on to next account
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
// Everything exists, so return true
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate bytes for mempow
|
||||
byte[] mempowBytes;
|
||||
try {
|
||||
mempowBytes = this.getMemoryPoWBytes(publicKey, onlineAccountsTimestamp);
|
||||
}
|
||||
catch (IOException e) {
|
||||
LOGGER.info("Unable to create bytes for MemoryPoW. Moving on to next account...");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compute nonce
|
||||
Integer nonce;
|
||||
try {
|
||||
nonce = this.computeMemoryPoW(mempowBytes, publicKey, onlineAccountsTimestamp);
|
||||
if (nonce == null) {
|
||||
// A nonce is required
|
||||
return false;
|
||||
}
|
||||
} catch (TimeoutException e) {
|
||||
LOGGER.info(String.format("Timed out computing nonce for account %.8s", Base58.encode(publicKey)));
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] signature = Qortal25519Extras.signForAggregation(privateKey, timestampBytes);
|
||||
|
||||
// Our account is online
|
||||
OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey, nonce);
|
||||
|
||||
// Make sure to verify before adding
|
||||
if (verifyMemoryPoW(ourOnlineAccountData, null)) {
|
||||
ourOnlineAccounts.add(ourOnlineAccountData);
|
||||
}
|
||||
}
|
||||
|
||||
this.hasOurOnlineAccounts = !ourOnlineAccounts.isEmpty();
|
||||
|
||||
boolean hasInfoChanged = addAccounts(ourOnlineAccounts);
|
||||
|
||||
if (!hasInfoChanged)
|
||||
return false;
|
||||
|
||||
Network.getInstance().broadcast(peer -> new OnlineAccountsV3Message(ourOnlineAccounts));
|
||||
|
||||
LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// MemoryPoW
|
||||
|
||||
private byte[] getMemoryPoWBytes(byte[] publicKey, long onlineAccountsTimestamp) throws IOException {
|
||||
byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
|
||||
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
outputStream.write(publicKey);
|
||||
outputStream.write(timestampBytes);
|
||||
|
||||
return outputStream.toByteArray();
|
||||
}
|
||||
|
||||
private Integer computeMemoryPoW(byte[] bytes, byte[] publicKey, long onlineAccountsTimestamp) throws TimeoutException {
|
||||
LOGGER.info(String.format("Computing nonce for account %.8s and timestamp %d...", Base58.encode(publicKey), onlineAccountsTimestamp));
|
||||
|
||||
// Calculate the time until the next online timestamp and use it as a timeout when computing the nonce
|
||||
Long startTime = NTP.getTime();
|
||||
final long nextOnlineAccountsTimestamp = toOnlineAccountTimestamp(startTime) + getOnlineTimestampModulus();
|
||||
long timeUntilNextTimestamp = nextOnlineAccountsTimestamp - startTime;
|
||||
|
||||
int difficulty = getPoWDifficulty(onlineAccountsTimestamp);
|
||||
Integer nonce = MemoryPoW.compute2(bytes, getPoWBufferSize(), difficulty, timeUntilNextTimestamp);
|
||||
|
||||
double totalSeconds = (NTP.getTime() - startTime) / 1000.0f;
|
||||
int minutes = (int) ((totalSeconds % 3600) / 60);
|
||||
int seconds = (int) (totalSeconds % 60);
|
||||
double hashRate = nonce / totalSeconds;
|
||||
|
||||
LOGGER.info(String.format("Computed nonce for timestamp %d and account %.8s: %d. Buffer size: %d. Difficulty: %d. " +
|
||||
"Time taken: %02d:%02d. Hashrate: %f", onlineAccountsTimestamp, Base58.encode(publicKey),
|
||||
nonce, getPoWBufferSize(), difficulty, minutes, seconds, hashRate));
|
||||
|
||||
return nonce;
|
||||
}
|
||||
|
||||
public boolean verifyMemoryPoW(OnlineAccountData onlineAccountData, long[] workBuffer) {
|
||||
// Require a valid nonce value
|
||||
if (onlineAccountData.getNonce() == null || onlineAccountData.getNonce() < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int nonce = onlineAccountData.getNonce();
|
||||
|
||||
byte[] mempowBytes;
|
||||
try {
|
||||
mempowBytes = this.getMemoryPoWBytes(onlineAccountData.getPublicKey(), onlineAccountData.getTimestamp());
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify the nonce
|
||||
return MemoryPoW.verify2(mempowBytes, workBuffer, getPoWBufferSize(), getPoWDifficulty(onlineAccountData.getTimestamp()), nonce);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns whether online accounts manager has any online accounts with timestamp recent enough to be considered currently online.
|
||||
*/
|
||||
// BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not
|
||||
public boolean hasOnlineAccounts() {
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return false;
|
||||
|
||||
return this.currentOnlineAccounts.containsKey(onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether we have submitted - or attempted to submit - our online account
|
||||
* signature(s) to the network.
|
||||
* @return true if our signature(s) have been submitted recently.
|
||||
*/
|
||||
public boolean hasActiveOnlineAccountSignatures() {
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (2 * 60 * 60 * 1000L);
|
||||
boolean isUpToDate = Controller.getInstance().isUpToDate(minLatestBlockTimestamp);
|
||||
|
||||
return isUpToDate && hasOurOnlineAccounts();
|
||||
}
|
||||
|
||||
public boolean hasOurOnlineAccounts() {
|
||||
return this.hasOurOnlineAccounts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of online accounts matching given timestamp.
|
||||
*/
|
||||
// Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block
|
||||
public List<OnlineAccountData> getOnlineAccounts(long onlineTimestamp) {
|
||||
LOGGER.debug(String.format("caller's timestamp: %d, our timestamps: %s", onlineTimestamp, String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", ")))));
|
||||
|
||||
return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet())));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of online accounts with timestamp recent enough to be considered currently online.
|
||||
*/
|
||||
// API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on
|
||||
public List<OnlineAccountData> getOnlineAccounts() {
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return Collections.emptyList();
|
||||
|
||||
return getOnlineAccounts(onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
// Block processing
|
||||
|
||||
/**
|
||||
* Removes previously validated entries from block's online accounts.
|
||||
* <p>
|
||||
* Checks both 'current' and block caches.
|
||||
* <p>
|
||||
* Typically called by {@link Block#areOnlineAccountsValid()}
|
||||
*/
|
||||
public void removeKnown(Set<OnlineAccountData> blocksOnlineAccounts, Long timestamp) {
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.get(timestamp);
|
||||
|
||||
// If not 'current' timestamp - try block cache instead
|
||||
if (onlineAccounts == null)
|
||||
onlineAccounts = this.latestBlocksOnlineAccounts.get(timestamp);
|
||||
|
||||
if (onlineAccounts != null)
|
||||
blocksOnlineAccounts.removeAll(onlineAccounts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds block's online accounts to one of OnlineAccountManager's caches.
|
||||
* <p>
|
||||
* It is assumed that the online accounts have been verified.
|
||||
* <p>
|
||||
* Typically called by {@link Block#areOnlineAccountsValid()}
|
||||
*/
|
||||
public void addBlocksOnlineAccounts(Set<OnlineAccountData> blocksOnlineAccounts, Long timestamp) {
|
||||
// If these are current accounts, then there is no need to cache them, and should instead rely
|
||||
// on the more complete entries we already have in self.currentOnlineAccounts.
|
||||
// Note: since sig-agg, we no longer have individual signatures included in blocks, so we
|
||||
// mustn't add anything to currentOnlineAccounts from here.
|
||||
if (this.currentOnlineAccounts.containsKey(timestamp))
|
||||
return;
|
||||
|
||||
// Add to block cache instead
|
||||
this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet())
|
||||
.addAll(blocksOnlineAccounts);
|
||||
|
||||
// If block cache has grown too large then we need to trim.
|
||||
if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) {
|
||||
// However, be careful to trim the opposite end to the entry we just added!
|
||||
Long firstKey = this.latestBlocksOnlineAccounts.firstKey();
|
||||
if (!firstKey.equals(timestamp))
|
||||
this.latestBlocksOnlineAccounts.remove(firstKey);
|
||||
else
|
||||
this.latestBlocksOnlineAccounts.remove(this.latestBlocksOnlineAccounts.lastKey());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Network handlers
|
||||
|
||||
public void onNetworkGetOnlineAccountsV3Message(Peer peer, Message message) {
|
||||
GetOnlineAccountsV3Message getOnlineAccountsMessage = (GetOnlineAccountsV3Message) message;
|
||||
|
||||
Map<Long, Map<Byte, byte[]>> peersHashes = getOnlineAccountsMessage.getHashesByTimestampThenByte();
|
||||
List<OnlineAccountData> outgoingOnlineAccounts = new ArrayList<>();
|
||||
|
||||
// Warning: no double-checking/fetching - we must be ConcurrentMap compatible!
|
||||
// So no contains()-then-get() or multiple get()s on the same key/map.
|
||||
// We also use getOrDefault() with emptySet() on currentOnlineAccounts in case corresponding timestamp entry isn't there.
|
||||
for (var ourOuterMapEntry : currentOnlineAccountsHashes.entrySet()) {
|
||||
Long timestamp = ourOuterMapEntry.getKey();
|
||||
|
||||
var ourInnerMap = ourOuterMapEntry.getValue();
|
||||
var peersInnerMap = peersHashes.get(timestamp);
|
||||
|
||||
if (peersInnerMap == null) {
|
||||
// Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours
|
||||
Set<OnlineAccountData> timestampsOnlineAccounts = this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet());
|
||||
outgoingOnlineAccounts.addAll(timestampsOnlineAccounts);
|
||||
|
||||
LOGGER.trace(() -> String.format("Going to send all %d online accounts for timestamp %d", timestampsOnlineAccounts.size(), timestamp));
|
||||
} else {
|
||||
// Quick cache of which leading bytes to send so we only have to filter once
|
||||
Set<Byte> outgoingLeadingBytes = new HashSet<>();
|
||||
|
||||
// We have entries for this timestamp so compare against peer's entries
|
||||
for (var ourInnerMapEntry : ourInnerMap.entrySet()) {
|
||||
Byte leadingByte = ourInnerMapEntry.getKey();
|
||||
byte[] peersHash = peersInnerMap.get(leadingByte);
|
||||
|
||||
if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) {
|
||||
// For this leading byte: hashes don't match or peer doesn't have entry
|
||||
// Send all online accounts for this timestamp and leading byte
|
||||
outgoingLeadingBytes.add(leadingByte);
|
||||
}
|
||||
}
|
||||
|
||||
int beforeAddSize = outgoingOnlineAccounts.size();
|
||||
|
||||
this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()).stream()
|
||||
.filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0]))
|
||||
.forEach(outgoingOnlineAccounts::add);
|
||||
|
||||
if (outgoingOnlineAccounts.size() > beforeAddSize)
|
||||
LOGGER.trace(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s",
|
||||
outgoingOnlineAccounts.size() - beforeAddSize,
|
||||
timestamp,
|
||||
outgoingLeadingBytes.stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", "))
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
peer.sendMessage(new OnlineAccountsV3Message(outgoingOnlineAccounts));
|
||||
|
||||
LOGGER.trace("Sent {} online accounts to {}", outgoingOnlineAccounts.size(), peer);
|
||||
}
|
||||
|
||||
public void onNetworkOnlineAccountsV3Message(Peer peer, Message message) {
|
||||
OnlineAccountsV3Message onlineAccountsMessage = (OnlineAccountsV3Message) message;
|
||||
|
||||
List<OnlineAccountData> peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts();
|
||||
LOGGER.trace("Received {} online accounts from {}", peersOnlineAccounts.size(), peer);
|
||||
|
||||
int importCount = 0;
|
||||
|
||||
// Add any online accounts to the queue that aren't already present
|
||||
for (OnlineAccountData onlineAccountData : peersOnlineAccounts) {
|
||||
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountData.getTimestamp(), k -> ConcurrentHashMap.newKeySet());
|
||||
if (onlineAccounts.contains(onlineAccountData))
|
||||
// We have already validated this online account
|
||||
continue;
|
||||
|
||||
boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
importCount++;
|
||||
}
|
||||
|
||||
if (importCount > 0)
|
||||
LOGGER.debug("Added {} online accounts to queue", importCount);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,404 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import com.rust.litewalletjni.LiteWalletJni;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataReader;
|
||||
import org.qortal.arbitrary.ArbitraryDataResource;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.crosschain.ForeignBlockchainException;
|
||||
import org.qortal.crosschain.PirateWallet;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class PirateChainWalletController extends Thread {
|
||||
|
||||
protected static final Logger LOGGER = LogManager.getLogger(PirateChainWalletController.class);
|
||||
|
||||
private static PirateChainWalletController instance;
|
||||
|
||||
final private static long SAVE_INTERVAL = 60 * 60 * 1000L; // 1 hour
|
||||
private long lastSaveTime = 0L;
|
||||
|
||||
private boolean running;
|
||||
private PirateWallet currentWallet = null;
|
||||
private boolean shouldLoadWallet = false;
|
||||
private String loadStatus = null;
|
||||
|
||||
private static String qdnWalletSignature = "EsfUw54perxkEtfoUoL7Z97XPrNsZRZXePVZPz3cwRm9qyEPSofD5KmgVpDqVitQp7LhnZRmL6z2V9hEe1YS45T";
|
||||
|
||||
|
||||
private PirateChainWalletController() {
|
||||
this.running = true;
|
||||
}
|
||||
|
||||
public static PirateChainWalletController getInstance() {
|
||||
if (instance == null)
|
||||
instance = new PirateChainWalletController();
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Pirate Chain Wallet Controller");
|
||||
|
||||
try {
|
||||
while (running && !Controller.isStopping()) {
|
||||
Thread.sleep(1000);
|
||||
|
||||
// Wait until we have a request to load the wallet
|
||||
if (!shouldLoadWallet) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!LiteWalletJni.isLoaded()) {
|
||||
this.loadLibrary();
|
||||
|
||||
// If still not loaded, sleep to prevent too many requests
|
||||
if (!LiteWalletJni.isLoaded()) {
|
||||
Thread.sleep(5 * 1000);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Wallet is downloaded, so clear the status
|
||||
this.loadStatus = null;
|
||||
|
||||
if (this.currentWallet == null) {
|
||||
// Nothing to do yet
|
||||
continue;
|
||||
}
|
||||
if (this.currentWallet.isNullSeedWallet()) {
|
||||
// Don't sync the null seed wallet
|
||||
continue;
|
||||
}
|
||||
|
||||
LOGGER.debug("Syncing Pirate Chain wallet...");
|
||||
String response = LiteWalletJni.execute("sync", "");
|
||||
LOGGER.debug("sync response: {}", response);
|
||||
|
||||
try {
|
||||
JSONObject json = new JSONObject(response);
|
||||
if (json.has("result")) {
|
||||
String result = json.getString("result");
|
||||
|
||||
// We may have to set wallet to ready if this is the first ever successful sync
|
||||
if (Objects.equals(result, "success")) {
|
||||
this.currentWallet.setReady(true);
|
||||
}
|
||||
}
|
||||
} catch (JSONException e) {
|
||||
LOGGER.info("Unable to interpret JSON", e);
|
||||
}
|
||||
|
||||
// Rate limit sync attempts
|
||||
Thread.sleep(30000);
|
||||
|
||||
// Save wallet if needed
|
||||
Long now = NTP.getTime();
|
||||
if (now != null && now-SAVE_INTERVAL >= this.lastSaveTime) {
|
||||
this.saveCurrentWallet();
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall-through to exit
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
// Save the wallet
|
||||
this.saveCurrentWallet();
|
||||
|
||||
this.running = false;
|
||||
this.interrupt();
|
||||
}
|
||||
|
||||
|
||||
// QDN & wallet libraries
|
||||
|
||||
private void loadLibrary() throws InterruptedException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Check if architecture is supported
|
||||
String libFileName = PirateChainWalletController.getRustLibFilename();
|
||||
if (libFileName == null) {
|
||||
String osName = System.getProperty("os.name");
|
||||
String osArchitecture = System.getProperty("os.arch");
|
||||
this.loadStatus = String.format("Unsupported architecture (%s %s)", osName, osArchitecture);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the library exists in the wallets folder
|
||||
Path libDirectory = PirateChainWalletController.getRustLibOuterDirectory();
|
||||
Path libPath = Paths.get(libDirectory.toString(), libFileName);
|
||||
if (Files.exists(libPath)) {
|
||||
// Already downloaded; we can load the library right away
|
||||
LiteWalletJni.loadLibrary();
|
||||
return;
|
||||
}
|
||||
|
||||
// Library not found, so check if we've fetched the resource from QDN
|
||||
ArbitraryTransactionData t = this.getTransactionData(repository);
|
||||
if (t == null || t.getService() == null) {
|
||||
// Can't find the transaction - maybe on a different chain?
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait until we have a sufficient number of peers to attempt QDN downloads
|
||||
List<Peer> handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
if (handshakedPeers.size() < Settings.getInstance().getMinBlockchainPeers()) {
|
||||
// Wait for more peers
|
||||
this.loadStatus = String.format("Searching for peers...");
|
||||
return;
|
||||
}
|
||||
|
||||
// Build resource
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(t.getName(),
|
||||
ArbitraryDataFile.ResourceIdType.NAME, t.getService(), t.getIdentifier());
|
||||
try {
|
||||
arbitraryDataReader.loadSynchronously(false);
|
||||
} catch (MissingDataException e) {
|
||||
LOGGER.info("Missing data when loading Pirate Chain library");
|
||||
}
|
||||
|
||||
// Check its status
|
||||
ArbitraryResourceStatus status = ArbitraryTransactionUtils.getStatus(
|
||||
t.getService(), t.getName(), t.getIdentifier(), false);
|
||||
|
||||
if (status.getStatus() != ArbitraryResourceStatus.Status.READY) {
|
||||
LOGGER.info("Not ready yet: {}", status.getTitle());
|
||||
this.loadStatus = String.format("Downloading files from QDN... (%d / %d)", status.getLocalChunkCount(), status.getTotalChunkCount());
|
||||
return;
|
||||
}
|
||||
|
||||
// Files are downloaded, so copy the necessary files to the wallets folder
|
||||
// Delete the wallets/*/lib directory first, in case earlier versions of the wallet are present
|
||||
Path walletsLibDirectory = PirateChainWalletController.getWalletsLibDirectory();
|
||||
if (Files.exists(walletsLibDirectory)) {
|
||||
FilesystemUtils.safeDeleteDirectory(walletsLibDirectory, false);
|
||||
}
|
||||
Files.createDirectories(libDirectory);
|
||||
FileUtils.copyDirectory(arbitraryDataReader.getFilePath().toFile(), libDirectory.toFile());
|
||||
|
||||
// Clear reader cache so only one copy exists
|
||||
ArbitraryDataResource resource = new ArbitraryDataResource(t.getName(),
|
||||
ArbitraryDataFile.ResourceIdType.NAME, t.getService(), t.getIdentifier());
|
||||
resource.deleteCache();
|
||||
|
||||
// Finally, load the library
|
||||
LiteWalletJni.loadLibrary();
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when loading Pirate Chain library", e);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Error when loading Pirate Chain library", e);
|
||||
}
|
||||
}
|
||||
|
||||
private ArbitraryTransactionData getTransactionData(Repository repository) {
|
||||
try {
|
||||
byte[] signature = Base58.decode(qdnWalletSignature);
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (!(transactionData instanceof ArbitraryTransactionData))
|
||||
return null;
|
||||
|
||||
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
|
||||
if (arbitraryTransaction != null) {
|
||||
return (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch (DataException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static String getRustLibFilename() {
|
||||
String osName = System.getProperty("os.name");
|
||||
String osArchitecture = System.getProperty("os.arch");
|
||||
|
||||
if (osName.equals("Mac OS X") && osArchitecture.equals("x86_64")) {
|
||||
return "librust-macos-x86_64.dylib";
|
||||
}
|
||||
else if ((osName.equals("Linux") || osName.equals("FreeBSD")) && osArchitecture.equals("aarch64")) {
|
||||
return "librust-linux-aarch64.so";
|
||||
}
|
||||
else if ((osName.equals("Linux") || osName.equals("FreeBSD")) && osArchitecture.equals("amd64")) {
|
||||
return "librust-linux-x86_64.so";
|
||||
}
|
||||
else if (osName.contains("Windows") && osArchitecture.equals("amd64")) {
|
||||
return "librust-windows-x86_64.dll";
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public static Path getWalletsLibDirectory() {
|
||||
return Paths.get(Settings.getInstance().getWalletsPath(), "PirateChain", "lib");
|
||||
}
|
||||
|
||||
public static Path getRustLibOuterDirectory() {
|
||||
String sigPrefix = qdnWalletSignature.substring(0, 8);
|
||||
return Paths.get(Settings.getInstance().getWalletsPath(), "PirateChain", "lib", sigPrefix);
|
||||
}
|
||||
|
||||
|
||||
// Wallet functions
|
||||
|
||||
public boolean initWithEntropy58(String entropy58) {
|
||||
return this.initWithEntropy58(entropy58, false);
|
||||
}
|
||||
|
||||
public boolean initNullSeedWallet() {
|
||||
return this.initWithEntropy58(Base58.encode(new byte[32]), true);
|
||||
}
|
||||
|
||||
private boolean initWithEntropy58(String entropy58, boolean isNullSeedWallet) {
|
||||
// If the JNI library isn't loaded yet then we can't proceed
|
||||
if (!LiteWalletJni.isLoaded()) {
|
||||
shouldLoadWallet = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] entropyBytes = Base58.decode(entropy58);
|
||||
|
||||
if (entropyBytes == null || entropyBytes.length != 32) {
|
||||
LOGGER.info("Invalid entropy bytes");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.currentWallet != null) {
|
||||
if (this.currentWallet.entropyBytesEqual(entropyBytes)) {
|
||||
// Wallet already active - nothing to do
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
// Different wallet requested - close the existing one and switch over
|
||||
this.closeCurrentWallet();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
this.currentWallet = new PirateWallet(entropyBytes, isNullSeedWallet);
|
||||
if (!this.currentWallet.isReady()) {
|
||||
// Don't persist wallets that aren't ready
|
||||
this.currentWallet = null;
|
||||
}
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to initialize wallet: {}", e.getMessage());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private void saveCurrentWallet() {
|
||||
if (this.currentWallet == null) {
|
||||
// Nothing to do
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (this.currentWallet.save()) {
|
||||
Long now = NTP.getTime();
|
||||
if (now != null) {
|
||||
this.lastSaveTime = now;
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to save wallet");
|
||||
}
|
||||
}
|
||||
|
||||
public PirateWallet getCurrentWallet() {
|
||||
return this.currentWallet;
|
||||
}
|
||||
|
||||
private void closeCurrentWallet() {
|
||||
this.saveCurrentWallet();
|
||||
this.currentWallet = null;
|
||||
}
|
||||
|
||||
public void ensureInitialized() throws ForeignBlockchainException {
|
||||
if (!LiteWalletJni.isLoaded() || this.currentWallet == null || !this.currentWallet.isInitialized()) {
|
||||
throw new ForeignBlockchainException("Pirate wallet isn't initialized yet");
|
||||
}
|
||||
}
|
||||
|
||||
public void ensureNotNullSeed() throws ForeignBlockchainException {
|
||||
// Safety check to make sure funds aren't sent to a null seed wallet
|
||||
if (this.currentWallet == null || this.currentWallet.isNullSeedWallet()) {
|
||||
throw new ForeignBlockchainException("Invalid wallet");
|
||||
}
|
||||
}
|
||||
|
||||
public void ensureSynchronized() throws ForeignBlockchainException {
|
||||
if (this.currentWallet == null || !this.currentWallet.isSynchronized()) {
|
||||
throw new ForeignBlockchainException("Wallet isn't synchronized yet");
|
||||
}
|
||||
|
||||
String response = LiteWalletJni.execute("syncStatus", "");
|
||||
JSONObject json = new JSONObject(response);
|
||||
if (json.has("syncing")) {
|
||||
boolean isSyncing = Boolean.valueOf(json.getString("syncing"));
|
||||
if (isSyncing) {
|
||||
long syncedBlocks = json.getLong("synced_blocks");
|
||||
long totalBlocks = json.getLong("total_blocks");
|
||||
|
||||
throw new ForeignBlockchainException(String.format("Sync in progress (%d / %d). Please try again later.", syncedBlocks, totalBlocks));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public String getSyncStatus() {
|
||||
if (this.currentWallet == null || !this.currentWallet.isInitialized()) {
|
||||
if (this.loadStatus != null) {
|
||||
return this.loadStatus;
|
||||
}
|
||||
|
||||
return "Not initialized yet";
|
||||
}
|
||||
|
||||
String syncStatusResponse = LiteWalletJni.execute("syncStatus", "");
|
||||
org.json.JSONObject json = new JSONObject(syncStatusResponse);
|
||||
if (json.has("syncing")) {
|
||||
boolean isSyncing = Boolean.valueOf(json.getString("syncing"));
|
||||
if (isSyncing) {
|
||||
long syncedBlocks = json.getLong("synced_blocks");
|
||||
long totalBlocks = json.getLong("total_blocks");
|
||||
return String.format("Sync in progress (%d / %d)", syncedBlocks, totalBlocks);
|
||||
}
|
||||
}
|
||||
|
||||
boolean isSynchronized = this.currentWallet.isSynchronized();
|
||||
if (isSynchronized) {
|
||||
return "Synchronized";
|
||||
}
|
||||
|
||||
return "Initializing wallet...";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -19,19 +19,13 @@ import org.qortal.block.BlockChain;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.block.CommonBlockData;
|
||||
import org.qortal.data.network.PeerChainTipData;
|
||||
import org.qortal.data.transaction.RewardShareTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.BlockMessage;
|
||||
import org.qortal.network.message.BlockSummariesMessage;
|
||||
import org.qortal.network.message.GetBlockMessage;
|
||||
import org.qortal.network.message.GetBlockSummariesMessage;
|
||||
import org.qortal.network.message.GetSignaturesV2Message;
|
||||
import org.qortal.network.message.Message;
|
||||
import org.qortal.network.message.SignaturesMessage;
|
||||
import org.qortal.network.message.Message.MessageType;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
@@ -59,7 +53,8 @@ public class Synchronizer extends Thread {
|
||||
/** Maximum number of block signatures we ask from peer in one go */
|
||||
private static final int MAXIMUM_REQUEST_SIZE = 200; // XXX move to Settings?
|
||||
|
||||
private static final long RECOVERY_MODE_TIMEOUT = 10 * 60 * 1000L; // ms
|
||||
/** Maximum number of consecutive failed sync attempts before marking peer as misbehaved */
|
||||
private static final int MAX_CONSECUTIVE_FAILED_SYNC_ATTEMPTS = 3;
|
||||
|
||||
|
||||
private boolean running;
|
||||
@@ -81,19 +76,39 @@ public class Synchronizer extends Thread {
|
||||
private volatile boolean isSynchronizing = false;
|
||||
/** Temporary estimate of synchronization progress for SysTray use. */
|
||||
private volatile int syncPercent = 0;
|
||||
/** Temporary estimate of blocks remaining for SysTray use. */
|
||||
private volatile int blocksRemaining = 0;
|
||||
|
||||
private static volatile boolean requestSync = false;
|
||||
private boolean syncRequestPending = false;
|
||||
|
||||
// Keep track of invalid blocks so that we don't keep trying to sync them
|
||||
private Map<String, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
|
||||
private Map<ByteArray, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
|
||||
public Long timeValidBlockLastReceived = null;
|
||||
public Long timeInvalidBlockLastReceived = null;
|
||||
|
||||
private static Synchronizer instance;
|
||||
|
||||
public enum SynchronizationResult {
|
||||
OK, NOTHING_TO_DO, GENESIS_ONLY, NO_COMMON_BLOCK, TOO_DIVERGENT, NO_REPLY, INFERIOR_CHAIN, INVALID_DATA, NO_BLOCKCHAIN_LOCK, REPOSITORY_ISSUE, SHUTTING_DOWN;
|
||||
OK, NOTHING_TO_DO, GENESIS_ONLY, NO_COMMON_BLOCK, TOO_DIVERGENT, NO_REPLY, INFERIOR_CHAIN, INVALID_DATA, NO_BLOCKCHAIN_LOCK, REPOSITORY_ISSUE, SHUTTING_DOWN, CHAIN_TIP_TOO_OLD;
|
||||
}
|
||||
|
||||
public static class NewChainTipEvent implements Event {
|
||||
private final BlockData priorChainTip;
|
||||
private final BlockData newChainTip;
|
||||
|
||||
public NewChainTipEvent(BlockData priorChainTip, BlockData newChainTip) {
|
||||
this.priorChainTip = priorChainTip;
|
||||
this.newChainTip = newChainTip;
|
||||
}
|
||||
|
||||
public BlockData getPriorChainTip() {
|
||||
return this.priorChainTip;
|
||||
}
|
||||
|
||||
public BlockData getNewChainTip() {
|
||||
return this.newChainTip;
|
||||
}
|
||||
}
|
||||
|
||||
// Constructors
|
||||
@@ -114,6 +129,11 @@ public class Synchronizer extends Thread {
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Synchronizer");
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
// Lite nodes don't need to sync
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
while (running && !Controller.isStopping()) {
|
||||
Thread.sleep(1000);
|
||||
@@ -153,10 +173,28 @@ public class Synchronizer extends Thread {
|
||||
|
||||
public Integer getSyncPercent() {
|
||||
synchronized (this.syncLock) {
|
||||
// Report as 100% synced if the latest block is within the last 60 mins
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
|
||||
return 100;
|
||||
}
|
||||
|
||||
return this.isSynchronizing ? this.syncPercent : null;
|
||||
}
|
||||
}
|
||||
|
||||
public Integer getBlocksRemaining() {
|
||||
synchronized (this.syncLock) {
|
||||
// Report as 0 blocks remaining if the latest block is within the last 60 mins
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return this.isSynchronizing ? this.blocksRemaining : null;
|
||||
}
|
||||
}
|
||||
|
||||
public void requestSync() {
|
||||
requestSync = true;
|
||||
}
|
||||
@@ -175,7 +213,8 @@ public class Synchronizer extends Thread {
|
||||
if (this.isSynchronizing)
|
||||
return true;
|
||||
|
||||
List<Peer> peers = Network.getInstance().getHandshakedPeers();
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
@@ -191,7 +230,8 @@ public class Synchronizer extends Thread {
|
||||
|
||||
checkRecoveryModeForPeers(peers);
|
||||
if (recoveryMode) {
|
||||
peers = Network.getInstance().getHandshakedPeers();
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
peers.removeIf(Controller.hasOnlyGenesisBlock);
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
peers.removeIf(Controller.hasOldVersion);
|
||||
@@ -207,6 +247,9 @@ public class Synchronizer extends Thread {
|
||||
// Disregard peers that are on the same block as last sync attempt and we didn't like their chain
|
||||
peers.removeIf(Controller.hasInferiorChainTip);
|
||||
|
||||
// Disregard peers that have a block with an invalid signer
|
||||
peers.removeIf(Controller.hasInvalidSigner);
|
||||
|
||||
final int peersBeforeComparison = peers.size();
|
||||
|
||||
// Request recent block summaries from the remaining peers, and locate our common block with each
|
||||
@@ -218,6 +261,12 @@ public class Synchronizer extends Thread {
|
||||
// We may have added more inferior chain tips when comparing peers, so remove any peers that are currently on those chains
|
||||
peers.removeIf(Controller.hasInferiorChainTip);
|
||||
|
||||
// Remove any peers that are no longer on a recent block since the last check
|
||||
// Except for times when we're in recovery mode, in which case we need to keep them
|
||||
if (!recoveryMode) {
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
}
|
||||
|
||||
final int peersRemoved = peersBeforeComparison - peers.size();
|
||||
if (peersRemoved > 0 && peers.size() > 0)
|
||||
LOGGER.debug(String.format("Ignoring %d peers on inferior chains. Peers remaining: %d", peersRemoved, peers.size()));
|
||||
@@ -250,7 +299,7 @@ public class Synchronizer extends Thread {
|
||||
BlockData priorChainTip = Controller.getInstance().getChainTip();
|
||||
|
||||
synchronized (this.syncLock) {
|
||||
this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getLastHeight();
|
||||
this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getHeight();
|
||||
|
||||
// Only update SysTray if we're potentially changing height
|
||||
if (this.syncPercent < 100) {
|
||||
@@ -280,7 +329,7 @@ public class Synchronizer extends Thread {
|
||||
|
||||
case INFERIOR_CHAIN: {
|
||||
// Update our list of inferior chain tips
|
||||
ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
|
||||
ByteArray inferiorChainSignature = ByteArray.wrap(peer.getChainTipData().getSignature());
|
||||
if (!inferiorChainSignatures.contains(inferiorChainSignature))
|
||||
inferiorChainSignatures.add(inferiorChainSignature);
|
||||
|
||||
@@ -288,7 +337,8 @@ public class Synchronizer extends Thread {
|
||||
LOGGER.debug(() -> String.format("Refused to synchronize with peer %s (%s)", peer, syncResult.name()));
|
||||
|
||||
// Notify peer of our superior chain
|
||||
if (!peer.sendMessage(Network.getInstance().buildHeightMessage(peer, priorChainTip)))
|
||||
Message message = Network.getInstance().buildHeightOrChainTipInfo(peer);
|
||||
if (message == null || !peer.sendMessage(message))
|
||||
peer.disconnect("failed to notify peer of our superior chain");
|
||||
break;
|
||||
}
|
||||
@@ -296,6 +346,7 @@ public class Synchronizer extends Thread {
|
||||
case NO_REPLY:
|
||||
case NO_BLOCKCHAIN_LOCK:
|
||||
case REPOSITORY_ISSUE:
|
||||
case CHAIN_TIP_TOO_OLD:
|
||||
// These are minor failure results so fine to try again
|
||||
LOGGER.debug(() -> String.format("Failed to synchronize with peer %s (%s)", peer, syncResult.name()));
|
||||
break;
|
||||
@@ -308,7 +359,7 @@ public class Synchronizer extends Thread {
|
||||
// fall-through...
|
||||
case NOTHING_TO_DO: {
|
||||
// Update our list of inferior chain tips
|
||||
ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
|
||||
ByteArray inferiorChainSignature = ByteArray.wrap(peer.getChainTipData().getSignature());
|
||||
if (!inferiorChainSignatures.contains(inferiorChainSignature))
|
||||
inferiorChainSignatures.add(inferiorChainSignature);
|
||||
|
||||
@@ -336,8 +387,9 @@ public class Synchronizer extends Thread {
|
||||
// Reset our cache of inferior chains
|
||||
inferiorChainSignatures.clear();
|
||||
|
||||
Network network = Network.getInstance();
|
||||
network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newChainTip));
|
||||
Network.getInstance().broadcastOurChain();
|
||||
|
||||
EventBus.INSTANCE.notify(new NewChainTipEvent(priorChainTip, newChainTip));
|
||||
}
|
||||
|
||||
return syncResult;
|
||||
@@ -348,7 +400,7 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
|
||||
private boolean checkRecoveryModeForPeers(List<Peer> qualifiedPeers) {
|
||||
List<Peer> handshakedPeers = Network.getInstance().getHandshakedPeers();
|
||||
List<Peer> handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
|
||||
if (handshakedPeers.size() > 0) {
|
||||
// There is at least one handshaked peer
|
||||
@@ -362,9 +414,10 @@ public class Synchronizer extends Thread {
|
||||
timePeersLastAvailable = NTP.getTime();
|
||||
|
||||
// If enough time has passed, enter recovery mode, which lifts some restrictions on who we can sync with and when we can mint
|
||||
if (NTP.getTime() - timePeersLastAvailable > RECOVERY_MODE_TIMEOUT) {
|
||||
long recoveryModeTimeout = Settings.getInstance().getRecoveryModeTimeout();
|
||||
if (NTP.getTime() - timePeersLastAvailable > recoveryModeTimeout) {
|
||||
if (recoveryMode == false) {
|
||||
LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", RECOVERY_MODE_TIMEOUT/60/1000));
|
||||
LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", recoveryModeTimeout/60/1000));
|
||||
recoveryMode = true;
|
||||
}
|
||||
}
|
||||
@@ -382,7 +435,7 @@ public class Synchronizer extends Thread {
|
||||
|
||||
public void addInferiorChainSignature(byte[] inferiorSignature) {
|
||||
// Update our list of inferior chain tips
|
||||
ByteArray inferiorChainSignature = new ByteArray(inferiorSignature);
|
||||
ByteArray inferiorChainSignature = ByteArray.wrap(inferiorSignature);
|
||||
if (!inferiorChainSignatures.contains(inferiorChainSignature))
|
||||
inferiorChainSignatures.add(inferiorChainSignature);
|
||||
}
|
||||
@@ -478,13 +531,13 @@ public class Synchronizer extends Thread {
|
||||
final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
|
||||
final int ourInitialHeight = ourLatestBlockData.getHeight();
|
||||
|
||||
PeerChainTipData peerChainTipData = peer.getChainTipData();
|
||||
int peerHeight = peerChainTipData.getLastHeight();
|
||||
byte[] peersLastBlockSignature = peerChainTipData.getLastBlockSignature();
|
||||
BlockSummaryData peerChainTipData = peer.getChainTipData();
|
||||
int peerHeight = peerChainTipData.getHeight();
|
||||
byte[] peersLastBlockSignature = peerChainTipData.getSignature();
|
||||
|
||||
byte[] ourLastBlockSignature = ourLatestBlockData.getSignature();
|
||||
LOGGER.debug(String.format("Fetching summaries from peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer,
|
||||
peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(),
|
||||
peerHeight, Base58.encode(peersLastBlockSignature), peerChainTipData.getTimestamp(),
|
||||
ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp()));
|
||||
|
||||
List<BlockSummaryData> peerBlockSummaries = new ArrayList<>();
|
||||
@@ -533,7 +586,7 @@ public class Synchronizer extends Thread {
|
||||
// If our latest block is very old, it's best that we don't try and determine the best peers to sync to.
|
||||
// This is because it can involve very large chain comparisons, which is too intensive.
|
||||
// In reality, most forking problems occur near the chain tips, so we will reserve this functionality for those situations.
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
return peers;
|
||||
|
||||
@@ -582,7 +635,7 @@ public class Synchronizer extends Thread {
|
||||
// We have already determined that the correct chain diverged from a lower height. We are safe to skip these peers.
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
LOGGER.debug(String.format("Peer %s has common block at height %d but the superior chain is at height %d. Removing it from this round.", peer, commonBlockSummary.getHeight(), dropPeersAfterCommonBlockHeight));
|
||||
this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
|
||||
//this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -593,16 +646,18 @@ public class Synchronizer extends Thread {
|
||||
int minChainLength = this.calculateMinChainLengthOfPeers(peersSharingCommonBlock, commonBlockSummary);
|
||||
|
||||
// Fetch block summaries from each peer
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
Iterator peersSharingCommonBlockIterator = peersSharingCommonBlock.iterator();
|
||||
while (peersSharingCommonBlockIterator.hasNext()) {
|
||||
Peer peer = (Peer) peersSharingCommonBlockIterator.next();
|
||||
|
||||
// If we're shutting down, just return the latest peer list
|
||||
if (Controller.isStopping())
|
||||
return peers;
|
||||
|
||||
// Count the number of blocks this peer has beyond our common block
|
||||
final PeerChainTipData peerChainTipData = peer.getChainTipData();
|
||||
final int peerHeight = peerChainTipData.getLastHeight();
|
||||
final byte[] peerLastBlockSignature = peerChainTipData.getLastBlockSignature();
|
||||
final BlockSummaryData peerChainTipData = peer.getChainTipData();
|
||||
final int peerHeight = peerChainTipData.getHeight();
|
||||
final byte[] peerLastBlockSignature = peerChainTipData.getSignature();
|
||||
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
|
||||
// Limit the number of blocks we are comparing. FUTURE: we could request more in batches, but there may not be a case when this is needed
|
||||
int summariesRequired = Math.min(peerAdditionalBlocksAfterCommonBlock, MAXIMUM_REQUEST_SIZE);
|
||||
@@ -650,6 +705,8 @@ public class Synchronizer extends Thread {
|
||||
if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
|
||||
LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer);
|
||||
peers.remove(peer);
|
||||
peersSharingCommonBlockIterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength
|
||||
@@ -688,7 +745,9 @@ public class Synchronizer extends Thread {
|
||||
|
||||
LOGGER.debug(String.format("Listing peers with common block %.8s...", Base58.encode(commonBlockSummary.getSignature())));
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
final int peerHeight = peer.getChainTipData().getLastHeight();
|
||||
BlockSummaryData peerChainTipData = peer.getChainTipData();
|
||||
final int peerHeight = peerChainTipData.getHeight();
|
||||
final Long peerLastBlockTimestamp = peerChainTipData.getTimestamp();
|
||||
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
|
||||
final CommonBlockData peerCommonBlockData = peer.getCommonBlockData();
|
||||
|
||||
@@ -699,6 +758,14 @@ public class Synchronizer extends Thread {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If peer is our of date (since our last check), we should exclude it from this round
|
||||
minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (peerLastBlockTimestamp == null || peerLastBlockTimestamp < minLatestBlockTimestamp) {
|
||||
LOGGER.debug(String.format("Peer %s is out of date - removing it from this round", peer));
|
||||
peers.remove(peer);
|
||||
continue;
|
||||
}
|
||||
|
||||
final List<BlockSummaryData> peerBlockSummariesAfterCommonBlock = peerCommonBlockData.getBlockSummariesAfterCommonBlock();
|
||||
populateBlockSummariesMinterLevels(repository, peerBlockSummariesAfterCommonBlock);
|
||||
|
||||
@@ -777,7 +844,7 @@ public class Synchronizer extends Thread {
|
||||
// Calculate the length of the shortest peer chain sharing this common block
|
||||
int minChainLength = 0;
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
final int peerHeight = peer.getChainTipData().getLastHeight();
|
||||
final int peerHeight = peer.getChainTipData().getHeight();
|
||||
final int peerAdditionalBlocksAfterCommonBlock = peerHeight - commonBlockSummary.getHeight();
|
||||
|
||||
if (peerAdditionalBlocksAfterCommonBlock < minChainLength || minChainLength == 0)
|
||||
@@ -796,6 +863,10 @@ public class Synchronizer extends Thread {
|
||||
|
||||
/* Invalid block signature tracking */
|
||||
|
||||
public Map<ByteArray, Long> getInvalidBlockSignatures() {
|
||||
return this.invalidBlockSignatures;
|
||||
}
|
||||
|
||||
private void addInvalidBlockSignature(byte[] signature) {
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
@@ -803,8 +874,7 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
|
||||
// Add or update existing entry
|
||||
String sig58 = Base58.encode(signature);
|
||||
invalidBlockSignatures.put(sig58, now);
|
||||
invalidBlockSignatures.put(ByteArray.wrap(signature), now);
|
||||
}
|
||||
private void deleteOlderInvalidSignatures(Long now) {
|
||||
if (now == null) {
|
||||
@@ -823,17 +893,16 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
}
|
||||
}
|
||||
private boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
|
||||
public boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
|
||||
if (blockSummaries == null || invalidBlockSignatures == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block summaries
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
|
||||
for (BlockSummaryData blockSummary : blockSummaries) {
|
||||
byte[] signature = blockSummary.getSignature();
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
if (Arrays.equals(signature, invalidSignature.value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -846,10 +915,9 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block signatures
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
|
||||
for (byte[] signature : blockSignatures) {
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
if (Arrays.equals(signature, invalidSignature.value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -884,13 +952,13 @@ public class Synchronizer extends Thread {
|
||||
final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
|
||||
final int ourInitialHeight = ourLatestBlockData.getHeight();
|
||||
|
||||
PeerChainTipData peerChainTipData = peer.getChainTipData();
|
||||
int peerHeight = peerChainTipData.getLastHeight();
|
||||
byte[] peersLastBlockSignature = peerChainTipData.getLastBlockSignature();
|
||||
BlockSummaryData peerChainTipData = peer.getChainTipData();
|
||||
int peerHeight = peerChainTipData.getHeight();
|
||||
byte[] peersLastBlockSignature = peerChainTipData.getSignature();
|
||||
|
||||
byte[] ourLastBlockSignature = ourLatestBlockData.getSignature();
|
||||
String syncString = String.format("Synchronizing with peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer,
|
||||
peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(),
|
||||
peerHeight, Base58.encode(peersLastBlockSignature), peerChainTipData.getTimestamp(),
|
||||
ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp());
|
||||
LOGGER.info(syncString);
|
||||
|
||||
@@ -1053,6 +1121,7 @@ public class Synchronizer extends Thread {
|
||||
// If common block is too far behind us then we're on massively different forks so give up.
|
||||
if (!force && testHeight < ourHeight - MAXIMUM_COMMON_DELTA) {
|
||||
LOGGER.info(String.format("Blockchain too divergent with peer %s", peer));
|
||||
peer.setLastTooDivergentTime(NTP.getTime());
|
||||
return SynchronizationResult.TOO_DIVERGENT;
|
||||
}
|
||||
|
||||
@@ -1062,6 +1131,9 @@ public class Synchronizer extends Thread {
|
||||
testHeight = Math.max(testHeight - step, 1);
|
||||
}
|
||||
|
||||
// Peer not considered too divergent
|
||||
peer.setLastTooDivergentTime(0L);
|
||||
|
||||
// Prepend test block's summary as first block summary, as summaries returned are *after* test block
|
||||
BlockSummaryData testBlockSummary = new BlockSummaryData(testBlockData);
|
||||
blockSummariesFromCommon.add(0, testBlockSummary);
|
||||
@@ -1197,7 +1269,14 @@ public class Synchronizer extends Thread {
|
||||
int numberSignaturesRequired = additionalPeerBlocksAfterCommonBlock - peerBlockSignatures.size();
|
||||
|
||||
int retryCount = 0;
|
||||
while (height < peerHeight) {
|
||||
|
||||
// Keep fetching blocks from peer until we reach their tip, or reach a count of MAXIMUM_COMMON_DELTA blocks.
|
||||
// We need to limit the total number, otherwise too much can be loaded into memory, causing an
|
||||
// OutOfMemoryException. This is common when syncing from 1000+ blocks behind the chain tip, after starting
|
||||
// from a small fork that didn't become part of the main chain. This causes the entire sync process to
|
||||
// use syncToPeerChain(), resulting in potentially thousands of blocks being held in memory if the limit
|
||||
// below isn't applied.
|
||||
while (height < peerHeight && peerBlocks.size() <= MAXIMUM_COMMON_DELTA) {
|
||||
if (Controller.isStopping())
|
||||
return SynchronizationResult.SHUTTING_DOWN;
|
||||
|
||||
@@ -1261,6 +1340,16 @@ public class Synchronizer extends Thread {
|
||||
return SynchronizationResult.INVALID_DATA;
|
||||
}
|
||||
|
||||
// Final check to make sure the peer isn't out of date (except for when we're in recovery mode)
|
||||
if (!recoveryMode && peer.getChainTipData() != null) {
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
final Long peerLastBlockTimestamp = peer.getChainTipData().getTimestamp();
|
||||
if (peerLastBlockTimestamp == null || peerLastBlockTimestamp < minLatestBlockTimestamp) {
|
||||
LOGGER.info(String.format("Peer %s is out of date, so abandoning sync attempt", peer));
|
||||
return SynchronizationResult.CHAIN_TIP_TOO_OLD;
|
||||
}
|
||||
}
|
||||
|
||||
byte[] nextPeerSignature = peerBlockSignatures.get(0);
|
||||
int nextHeight = height + 1;
|
||||
|
||||
@@ -1389,6 +1478,12 @@ public class Synchronizer extends Thread {
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
synchronized (this.syncLock) {
|
||||
if (peer.getChainTipData() != null) {
|
||||
this.blocksRemaining = peer.getChainTipData().getHeight() - newBlock.getBlockData().getHeight();
|
||||
}
|
||||
}
|
||||
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
}
|
||||
|
||||
@@ -1484,6 +1579,12 @@ public class Synchronizer extends Thread {
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
synchronized (this.syncLock) {
|
||||
if (peer.getChainTipData() != null) {
|
||||
this.blocksRemaining = peer.getChainTipData().getHeight() - newBlock.getBlockData().getHeight();
|
||||
}
|
||||
}
|
||||
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
}
|
||||
|
||||
@@ -1494,12 +1595,19 @@ public class Synchronizer extends Thread {
|
||||
Message getBlockSummariesMessage = new GetBlockSummariesMessage(parentSignature, numberRequested);
|
||||
|
||||
Message message = peer.getResponse(getBlockSummariesMessage);
|
||||
if (message == null || message.getType() != MessageType.BLOCK_SUMMARIES)
|
||||
if (message == null)
|
||||
return null;
|
||||
|
||||
BlockSummariesMessage blockSummariesMessage = (BlockSummariesMessage) message;
|
||||
if (message.getType() == MessageType.BLOCK_SUMMARIES) {
|
||||
BlockSummariesMessage blockSummariesMessage = (BlockSummariesMessage) message;
|
||||
return blockSummariesMessage.getBlockSummaries();
|
||||
}
|
||||
else if (message.getType() == MessageType.BLOCK_SUMMARIES_V2) {
|
||||
BlockSummariesV2Message blockSummariesMessage = (BlockSummariesV2Message) message;
|
||||
return blockSummariesMessage.getBlockSummaries();
|
||||
}
|
||||
|
||||
return blockSummariesMessage.getBlockSummaries();
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<byte[]> getBlockSignatures(Peer peer, byte[] parentSignature, int numberRequested) throws InterruptedException {
|
||||
@@ -1518,12 +1626,35 @@ public class Synchronizer extends Thread {
|
||||
Message getBlockMessage = new GetBlockMessage(signature);
|
||||
|
||||
Message message = peer.getResponse(getBlockMessage);
|
||||
if (message == null || message.getType() != MessageType.BLOCK)
|
||||
if (message == null) {
|
||||
peer.getPeerData().incrementFailedSyncCount();
|
||||
if (peer.getPeerData().getFailedSyncCount() >= MAX_CONSECUTIVE_FAILED_SYNC_ATTEMPTS) {
|
||||
// Several failed attempts, so mark peer as misbehaved
|
||||
LOGGER.info("Marking peer {} as misbehaved due to {} failed sync attempts", peer, peer.getPeerData().getFailedSyncCount());
|
||||
Network.getInstance().peerMisbehaved(peer);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
BlockMessage blockMessage = (BlockMessage) message;
|
||||
// Reset failed sync count now that we have a block response
|
||||
// FUTURE: we could move this to the end of the sync process, but to reduce risk this can be done
|
||||
// at a later stage. For now we are only defending against serialization errors or no responses.
|
||||
peer.getPeerData().setFailedSyncCount(0);
|
||||
|
||||
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
|
||||
switch (message.getType()) {
|
||||
case BLOCK: {
|
||||
BlockMessage blockMessage = (BlockMessage) message;
|
||||
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
|
||||
}
|
||||
|
||||
case BLOCK_V2: {
|
||||
BlockV2Message blockMessage = (BlockV2Message) message;
|
||||
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStatesHash());
|
||||
}
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public void populateBlockSummariesMinterLevels(Repository repository, List<BlockSummaryData> blockSummaries) throws DataException {
|
||||
|
||||
441
src/main/java/org/qortal/controller/TransactionImporter.java
Normal file
441
src/main/java/org/qortal/controller/TransactionImporter.java
Normal file
@@ -0,0 +1,441 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.GetTransactionMessage;
|
||||
import org.qortal.network.message.Message;
|
||||
import org.qortal.network.message.TransactionMessage;
|
||||
import org.qortal.network.message.TransactionSignaturesMessage;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TransactionImporter extends Thread {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(TransactionImporter.class);
|
||||
|
||||
private static TransactionImporter instance;
|
||||
private volatile boolean isStopping = false;
|
||||
|
||||
private static final int MAX_INCOMING_TRANSACTIONS = 5000;
|
||||
|
||||
/** Minimum time before considering an invalid unconfirmed transaction as "stale" */
|
||||
public static final long INVALID_TRANSACTION_STALE_TIMEOUT = 30 * 60 * 1000L; // ms
|
||||
/** Minimum frequency to re-request stale unconfirmed transactions from peers, to recheck validity */
|
||||
public static final long INVALID_TRANSACTION_RECHECK_INTERVAL = 60 * 60 * 1000L; // ms\
|
||||
/** Minimum frequency to re-request expired unconfirmed transactions from peers, to recheck validity
|
||||
* This mainly exists to stop expired transactions from bloating the list */
|
||||
public static final long EXPIRED_TRANSACTION_RECHECK_INTERVAL = 10 * 60 * 1000L; // ms
|
||||
|
||||
|
||||
/** Map of incoming transaction that are in the import queue. Key is transaction data, value is whether signature has been validated. */
|
||||
private final Map<TransactionData, Boolean> incomingTransactions = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
/** Map of recent invalid unconfirmed transactions. Key is base58 transaction signature, value is do-not-request expiry timestamp. */
|
||||
private final Map<String, Long> invalidUnconfirmedTransactions = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
|
||||
public static synchronized TransactionImporter getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new TransactionImporter();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Transaction Importer");
|
||||
|
||||
try {
|
||||
while (!Controller.isStopping()) {
|
||||
Thread.sleep(500L);
|
||||
|
||||
// Process incoming transactions queue
|
||||
validateTransactionsInQueue();
|
||||
importTransactionsInQueue();
|
||||
|
||||
// Clean up invalid incoming transactions list
|
||||
cleanupInvalidTransactionsList(NTP.getTime());
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit thread
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
isStopping = true;
|
||||
this.interrupt();
|
||||
}
|
||||
|
||||
|
||||
// Incoming transactions queue
|
||||
|
||||
private boolean incomingTransactionQueueContains(byte[] signature) {
|
||||
synchronized (incomingTransactions) {
|
||||
return incomingTransactions.keySet().stream().anyMatch(t -> Arrays.equals(t.getSignature(), signature));
|
||||
}
|
||||
}
|
||||
|
||||
private void removeIncomingTransaction(byte[] signature) {
|
||||
incomingTransactions.keySet().removeIf(t -> Arrays.equals(t.getSignature(), signature));
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all pending unconfirmed transactions that have had their signatures validated.
|
||||
* @return a list of TransactionData objects, with valid signatures.
|
||||
*/
|
||||
private List<TransactionData> getCachedSigValidTransactions() {
|
||||
synchronized (this.incomingTransactions) {
|
||||
return this.incomingTransactions.entrySet().stream()
|
||||
.filter(t -> Boolean.TRUE.equals(t.getValue()))
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the signatures of any transactions pending import, then update their
|
||||
* entries in the queue to mark them as valid/invalid.
|
||||
*
|
||||
* No database lock is required.
|
||||
*/
|
||||
private void validateTransactionsInQueue() {
|
||||
if (this.incomingTransactions.isEmpty()) {
|
||||
// Nothing to do?
|
||||
return;
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Take a snapshot of incomingTransactions, so we don't need to lock it while processing
|
||||
Map<TransactionData, Boolean> incomingTransactionsCopy = Map.copyOf(this.incomingTransactions);
|
||||
|
||||
int unvalidatedCount = Collections.frequency(incomingTransactionsCopy.values(), Boolean.FALSE);
|
||||
int validatedCount = 0;
|
||||
|
||||
if (unvalidatedCount > 0) {
|
||||
LOGGER.debug("Validating signatures in incoming transactions queue (size {})...", unvalidatedCount);
|
||||
}
|
||||
|
||||
// A list of all currently pending transactions that have valid signatures
|
||||
List<Transaction> sigValidTransactions = new ArrayList<>();
|
||||
|
||||
// A list of signatures that became valid in this round
|
||||
List<byte[]> newlyValidSignatures = new ArrayList<>();
|
||||
|
||||
boolean isLiteNode = Settings.getInstance().isLite();
|
||||
|
||||
// We need the latest block in order to check for expired transactions
|
||||
BlockData latestBlock = Controller.getInstance().getChainTip();
|
||||
|
||||
// Signature validation round - does not require blockchain lock
|
||||
for (Map.Entry<TransactionData, Boolean> transactionEntry : incomingTransactionsCopy.entrySet()) {
|
||||
// Quick exit?
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
TransactionData transactionData = transactionEntry.getKey();
|
||||
Transaction transaction = Transaction.fromData(repository, transactionData);
|
||||
String signature58 = Base58.encode(transactionData.getSignature());
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Drop expired transactions before they are considered "sig valid"
|
||||
if (latestBlock != null && transaction.getDeadline() <= latestBlock.getTimestamp()) {
|
||||
LOGGER.debug("Removing expired {} transaction {} from import queue", transactionData.getType().name(), signature58);
|
||||
removeIncomingTransaction(transactionData.getSignature());
|
||||
invalidUnconfirmedTransactions.put(signature58, (now + EXPIRED_TRANSACTION_RECHECK_INTERVAL));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Only validate signature if we haven't already done so
|
||||
Boolean isSigValid = transactionEntry.getValue();
|
||||
if (!Boolean.TRUE.equals(isSigValid)) {
|
||||
if (isLiteNode) {
|
||||
// Lite nodes can't easily validate transactions, so for now we will have to assume that everything is valid
|
||||
sigValidTransactions.add(transaction);
|
||||
newlyValidSignatures.add(transactionData.getSignature());
|
||||
// Add mark signature as valid if transaction still exists in import queue
|
||||
incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!transaction.isSignatureValid()) {
|
||||
LOGGER.debug("Ignoring {} transaction {} with invalid signature", transactionData.getType().name(), signature58);
|
||||
removeIncomingTransaction(transactionData.getSignature());
|
||||
|
||||
// Also add to invalidIncomingTransactions map
|
||||
now = NTP.getTime();
|
||||
if (now != null) {
|
||||
Long expiry = now + INVALID_TRANSACTION_RECHECK_INTERVAL;
|
||||
LOGGER.trace("Adding invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
|
||||
// Add to invalidUnconfirmedTransactions so that we don't keep requesting it
|
||||
invalidUnconfirmedTransactions.put(signature58, expiry);
|
||||
}
|
||||
|
||||
// We're done with this transaction
|
||||
continue;
|
||||
}
|
||||
|
||||
// Count the number that were validated in this round, for logging purposes
|
||||
validatedCount++;
|
||||
|
||||
// Add mark signature as valid if transaction still exists in import queue
|
||||
incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
|
||||
|
||||
// Signature validated in this round
|
||||
newlyValidSignatures.add(transactionData.getSignature());
|
||||
|
||||
} else {
|
||||
LOGGER.trace(() -> String.format("Transaction %s known to have valid signature", Base58.encode(transactionData.getSignature())));
|
||||
}
|
||||
|
||||
// Signature valid - add to shortlist
|
||||
sigValidTransactions.add(transaction);
|
||||
}
|
||||
|
||||
if (unvalidatedCount > 0) {
|
||||
LOGGER.debug("Finished validating signatures in incoming transactions queue (valid this round: {}, total pending import: {})...", validatedCount, sigValidTransactions.size());
|
||||
}
|
||||
|
||||
if (!newlyValidSignatures.isEmpty()) {
|
||||
LOGGER.debug("Broadcasting {} newly valid signatures ahead of import", newlyValidSignatures.size());
|
||||
Message newTransactionSignatureMessage = new TransactionSignaturesMessage(newlyValidSignatures);
|
||||
Network.getInstance().broadcast(broadcastPeer -> newTransactionSignatureMessage);
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue while processing incoming transactions", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Import any transactions in the queue that have valid signatures.
|
||||
*
|
||||
* A database lock is required.
|
||||
*/
|
||||
private void importTransactionsInQueue() {
|
||||
List<TransactionData> sigValidTransactions = this.getCachedSigValidTransactions();
|
||||
if (sigValidTransactions.isEmpty()) {
|
||||
// Don't bother locking if there are no new transactions to process
|
||||
return;
|
||||
}
|
||||
|
||||
if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) {
|
||||
// Prioritize syncing, and don't attempt to lock
|
||||
return;
|
||||
}
|
||||
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock()) {
|
||||
LOGGER.debug("Too busy to import incoming transactions queue");
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Importing incoming transactions queue (size {})...", sigValidTransactions.size());
|
||||
|
||||
int processedCount = 0;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Import transactions with valid signatures
|
||||
try {
|
||||
for (int i = 0; i < sigValidTransactions.size(); ++i) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (Synchronizer.getInstance().isSyncRequestPending()) {
|
||||
LOGGER.debug("Breaking out of transaction importing with {} remaining, because a sync request is pending", sigValidTransactions.size() - i);
|
||||
return;
|
||||
}
|
||||
|
||||
TransactionData transactionData = sigValidTransactions.get(i);
|
||||
Transaction transaction = Transaction.fromData(repository, transactionData);
|
||||
|
||||
Transaction.ValidationResult validationResult = transaction.importAsUnconfirmed();
|
||||
processedCount++;
|
||||
|
||||
switch (validationResult) {
|
||||
case TRANSACTION_ALREADY_EXISTS: {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing transaction %s", Base58.encode(transactionData.getSignature())));
|
||||
break;
|
||||
}
|
||||
|
||||
case NO_BLOCKCHAIN_LOCK: {
|
||||
// Is this even possible considering we acquired blockchain lock above?
|
||||
LOGGER.trace(() -> String.format("Couldn't lock blockchain to import unconfirmed transaction %s", Base58.encode(transactionData.getSignature())));
|
||||
break;
|
||||
}
|
||||
|
||||
case OK: {
|
||||
LOGGER.debug(() -> String.format("Imported %s transaction %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature())));
|
||||
break;
|
||||
}
|
||||
|
||||
// All other invalid cases:
|
||||
default: {
|
||||
final String signature58 = Base58.encode(transactionData.getSignature());
|
||||
LOGGER.debug(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58));
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now != null && now - transactionData.getTimestamp() > INVALID_TRANSACTION_STALE_TIMEOUT) {
|
||||
Long expiryLength = INVALID_TRANSACTION_RECHECK_INTERVAL;
|
||||
|
||||
if (validationResult == Transaction.ValidationResult.TIMESTAMP_TOO_OLD) {
|
||||
// Use shorter recheck interval for expired transactions
|
||||
expiryLength = EXPIRED_TRANSACTION_RECHECK_INTERVAL;
|
||||
}
|
||||
|
||||
Long expiry = now + expiryLength;
|
||||
LOGGER.trace("Adding stale invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
|
||||
// Invalid, unconfirmed transaction has become stale - add to invalidUnconfirmedTransactions so that we don't keep requesting it
|
||||
invalidUnconfirmedTransactions.put(signature58, expiry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction has been processed, even if only to reject it
|
||||
removeIncomingTransaction(transactionData.getSignature());
|
||||
}
|
||||
} finally {
|
||||
LOGGER.debug("Finished importing {} incoming transaction{}", processedCount, (processedCount == 1 ? "" : "s"));
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue while importing incoming transactions", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanupInvalidTransactionsList(Long now) {
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
// Periodically remove invalid unconfirmed transactions from the list, so that they can be fetched again
|
||||
invalidUnconfirmedTransactions.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < now);
|
||||
}
|
||||
|
||||
|
||||
// Network handlers
|
||||
|
||||
public void onNetworkTransactionMessage(Peer peer, Message message) {
|
||||
TransactionMessage transactionMessage = (TransactionMessage) message;
|
||||
TransactionData transactionData = transactionMessage.getTransactionData();
|
||||
|
||||
if (this.incomingTransactions.size() < MAX_INCOMING_TRANSACTIONS) {
|
||||
synchronized (this.incomingTransactions) {
|
||||
if (!incomingTransactionQueueContains(transactionData.getSignature())) {
|
||||
this.incomingTransactions.put(transactionData, Boolean.FALSE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void onNetworkGetTransactionMessage(Peer peer, Message message) {
|
||||
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message;
|
||||
byte[] signature = getTransactionMessage.getSignature();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Firstly check the sig-valid transactions that are currently queued for import
|
||||
TransactionData transactionData = this.getCachedSigValidTransactions().stream()
|
||||
.filter(t -> Arrays.equals(signature, t.getSignature()))
|
||||
.findFirst().orElse(null);
|
||||
|
||||
if (transactionData == null) {
|
||||
// Not found in import queue, so try the database
|
||||
transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
}
|
||||
|
||||
if (transactionData == null) {
|
||||
// Still not found - so we don't have this transaction
|
||||
LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
|
||||
// Send no response at all???
|
||||
return;
|
||||
}
|
||||
|
||||
Message transactionMessage = new TransactionMessage(transactionData);
|
||||
transactionMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(transactionMessage))
|
||||
peer.disconnect("failed to send transaction");
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void onNetworkGetUnconfirmedTransactionsMessage(Peer peer, Message message) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<byte[]> signatures = Collections.emptyList();
|
||||
|
||||
// If we're NOT up-to-date then don't send out unconfirmed transactions
|
||||
// as it's possible they are already included in a later block that we don't have.
|
||||
if (Controller.getInstance().isUpToDate())
|
||||
signatures = repository.getTransactionRepository().getUnconfirmedTransactionSignatures();
|
||||
|
||||
Message transactionSignaturesMessage = new TransactionSignaturesMessage(signatures);
|
||||
if (!peer.sendMessage(transactionSignaturesMessage))
|
||||
peer.disconnect("failed to send unconfirmed transaction signatures");
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending unconfirmed transaction signatures to peer %s", peer), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void onNetworkTransactionSignaturesMessage(Peer peer, Message message) {
|
||||
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message;
|
||||
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (byte[] signature : signatures) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
|
||||
// Previously invalid transaction - don't keep requesting it
|
||||
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore if this transaction is in the queue
|
||||
if (incomingTransactionQueueContains(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Do we have it already? (Before requesting transaction data itself)
|
||||
if (repository.getTransactionRepository().exists(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check isInterrupted() here and exit fast
|
||||
if (Thread.currentThread().isInterrupted())
|
||||
return;
|
||||
|
||||
// Fetch actual transaction data from peer
|
||||
Message getTransactionMessage = new GetTransactionMessage(signature);
|
||||
if (!peer.sendMessage(getTransactionMessage)) {
|
||||
peer.disconnect("failed to request transaction");
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -11,10 +11,7 @@ import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
@@ -137,7 +134,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
// Fetch the transaction data
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null) {
|
||||
if (arbitraryTransactionData == null || arbitraryTransactionData.getService() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -180,9 +177,6 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
arbitraryTransactionData.getName(), Base58.encode(signature)));
|
||||
|
||||
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
|
||||
|
||||
// We should also remove peers for this transaction from the lookup table to save space
|
||||
this.removePeersHostingTransactionData(repository, arbitraryTransactionData);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -207,7 +201,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
if (completeFileExists && !allChunksExist) {
|
||||
// We have the complete file but not the chunks, so let's convert it
|
||||
LOGGER.info(String.format("Transaction %s has complete file but no chunks",
|
||||
LOGGER.debug(String.format("Transaction %s has complete file but no chunks",
|
||||
Base58.encode(arbitraryTransactionData.getSignature())));
|
||||
|
||||
ArbitraryTransactionUtils.convertFileToChunks(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
|
||||
@@ -222,7 +216,11 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Check if there are any hosted files that don't have matching transactions
|
||||
this.checkForExpiredTransactions(repository);
|
||||
// UPDATE: This has been disabled for now as it was deleting valid transactions
|
||||
// and causing chunks to go missing on the network. If ever re-enabled, we MUST
|
||||
// ensure that original copies of data aren't deleted, and that sufficient time
|
||||
// is allowed (ideally several hours) before treating a transaction as missing.
|
||||
// this.checkForExpiredTransactions(repository);
|
||||
|
||||
// Delete additional data at random if we're over our storage limit
|
||||
// Use the DELETION_THRESHOLD so that we only start deleting once the hard limit is reached
|
||||
@@ -238,7 +236,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
// Delete random data associated with name if we're over our storage limit for this name
|
||||
// Use the DELETION_THRESHOLD, for the same reasons as above
|
||||
for (String followedName : storageManager.followedNames()) {
|
||||
for (String followedName : ListUtils.followedNames()) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
@@ -433,16 +431,6 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
return false;
|
||||
}
|
||||
|
||||
private void removePeersHostingTransactionData(Repository repository, ArbitraryTransactionData transactionData) {
|
||||
byte[] signature = transactionData.getSignature();
|
||||
try {
|
||||
repository.getArbitraryRepository().deleteArbitraryPeersWithSignature(signature);
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to delete peers from lookup table for signature: {}", Base58.encode(signature));
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanupTempDirectory(String folder, long now, long minAge) {
|
||||
String baseDir = Settings.getInstance().getTempDataPath();
|
||||
Path tempDir = Paths.get(baseDir, folder);
|
||||
@@ -496,7 +484,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
// Delete data relating to blocked names
|
||||
String name = directory.getName();
|
||||
if (name != null && storageManager.isNameBlocked(name)) {
|
||||
if (name != null && ListUtils.isNameBlocked(name)) {
|
||||
this.safeDeleteDirectory(directory, "blocked name");
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataFileChunk;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryDirectConnectionInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryRelayInfo;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@@ -18,17 +20,21 @@ import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.ListUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES;
|
||||
|
||||
public class ArbitraryDataFileListManager {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileListManager.class);
|
||||
|
||||
private static ArbitraryDataFileListManager instance;
|
||||
|
||||
private static String MIN_PEER_VERSION_FOR_FILE_LIST_STATS = "3.2.0";
|
||||
|
||||
/**
|
||||
* Map of recent incoming requests for ARBITRARY transaction data file lists.
|
||||
@@ -58,9 +64,12 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
|
||||
/** Maximum number of seconds that a file list relay request is able to exist on the network */
|
||||
private static long RELAY_REQUEST_MAX_DURATION = 5000L;
|
||||
public static long RELAY_REQUEST_MAX_DURATION = 5000L;
|
||||
/** Maximum number of hops that a file list relay request is allowed to make */
|
||||
private static int RELAY_REQUEST_MAX_HOPS = 4;
|
||||
public static int RELAY_REQUEST_MAX_HOPS = 4;
|
||||
|
||||
/** Minimum peer version to use relay */
|
||||
public static String RELAY_MIN_PEER_VERSION = "3.4.0";
|
||||
|
||||
|
||||
private ArbitraryDataFileListManager() {
|
||||
@@ -115,19 +124,29 @@ public class ArbitraryDataFileListManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Then allow another 5 attempts, each 5 minutes apart
|
||||
if (timeSinceLastAttempt > 5 * 60 * 1000L) {
|
||||
// We haven't tried for at least 5 minutes
|
||||
// Then allow another 5 attempts, each 1 minute apart
|
||||
if (timeSinceLastAttempt > 60 * 1000L) {
|
||||
// We haven't tried for at least 1 minute
|
||||
|
||||
if (networkBroadcastCount < 5) {
|
||||
// We've made less than 5 total attempts
|
||||
if (networkBroadcastCount < 8) {
|
||||
// We've made less than 8 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// From then on, only try once every 24 hours, to reduce network spam
|
||||
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
|
||||
// We haven't tried for at least 24 hours
|
||||
// Then allow another 8 attempts, each 15 minutes apart
|
||||
if (timeSinceLastAttempt > 15 * 60 * 1000L) {
|
||||
// We haven't tried for at least 15 minutes
|
||||
|
||||
if (networkBroadcastCount < 16) {
|
||||
// We've made less than 16 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// From then on, only try once every 6 hours, to reduce network spam
|
||||
if (timeSinceLastAttempt > 6 * 60 * 60 * 1000L) {
|
||||
// We haven't tried for at least 6 hours
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -179,8 +198,8 @@ public class ArbitraryDataFileListManager {
|
||||
}
|
||||
}
|
||||
|
||||
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
|
||||
// We haven't tried for at least 24 hours
|
||||
if (timeSinceLastAttempt > 60 * 60 * 1000L) {
|
||||
// We haven't tried for at least 1 hour
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -240,8 +259,6 @@ public class ArbitraryDataFileListManager {
|
||||
// Lookup file lists by signature (and optionally hashes)
|
||||
|
||||
public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
|
||||
byte[] digest = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
|
||||
@@ -263,13 +280,12 @@ public class ArbitraryDataFileListManager {
|
||||
}
|
||||
this.addToSignatureRequests(signature58, true, false);
|
||||
|
||||
List<Peer> handshakedPeers = Network.getInstance().getHandshakedPeers();
|
||||
List<Peer> handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
|
||||
List<byte[]> missingHashes = null;
|
||||
|
||||
// Find hashes that we are missing
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
missingHashes = arbitraryDataFile.missingHashes();
|
||||
} catch (DataException e) {
|
||||
// Leave missingHashes as null, so that all hashes are requested
|
||||
@@ -278,8 +294,11 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to %d peers...", signature58, hashCount, handshakedPeers.size()));
|
||||
|
||||
// Send our address as requestingPeer, to allow for potential direct connections with seeds/peers
|
||||
String requestingPeer = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
|
||||
// Build request
|
||||
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0);
|
||||
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0, requestingPeer);
|
||||
|
||||
// Save our request into requests map
|
||||
Triple<String, Peer, Long> requestEntry = new Triple<>(signature58, null, NTP.getTime());
|
||||
@@ -337,7 +356,7 @@ public class ArbitraryDataFileListManager {
|
||||
// This could be optimized in the future
|
||||
long timestamp = now - 60000L;
|
||||
List<byte[]> hashes = null;
|
||||
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, timestamp, 0);
|
||||
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, timestamp, 0, null);
|
||||
|
||||
// Save our request into requests map
|
||||
Triple<String, Peer, Long> requestEntry = new Triple<>(signature58, null, NTP.getTime());
|
||||
@@ -403,6 +422,13 @@ public class ArbitraryDataFileListManager {
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
|
||||
|
||||
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
|
||||
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
|
||||
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
|
||||
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
|
||||
// Do we have a pending request for this data?
|
||||
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
|
||||
if (request == null || request.getA() == null) {
|
||||
@@ -423,7 +449,6 @@ public class ArbitraryDataFileListManager {
|
||||
}
|
||||
|
||||
ArbitraryTransactionData arbitraryTransactionData = null;
|
||||
ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
|
||||
|
||||
// Check transaction exists and hashes are correct
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@@ -433,10 +458,9 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash());
|
||||
|
||||
// // Load data file(s)
|
||||
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
//
|
||||
// // Check all hashes exist
|
||||
// for (byte[] hash : hashes) {
|
||||
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
|
||||
@@ -450,16 +474,28 @@ public class ArbitraryDataFileListManager {
|
||||
// }
|
||||
|
||||
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
|
||||
// Keep track of the hashes this peer reports to have access to
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
String sig58 = Base58.encode(signature);
|
||||
ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.put(hash58, new Triple<>(peer, sig58, now));
|
||||
|
||||
if (ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) {
|
||||
// Keep track of the hashes this peer reports to have access to
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
|
||||
// Treat null request hops as 100, so that they are able to be sorted (and put to the end of the list)
|
||||
int requestHops = arbitraryDataFileListMessage.getRequestHops() != null ? arbitraryDataFileListMessage.getRequestHops() : 100;
|
||||
|
||||
ArbitraryFileListResponseInfo responseInfo = new ArbitraryFileListResponseInfo(hash58, signature58,
|
||||
peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops);
|
||||
|
||||
ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo);
|
||||
}
|
||||
}
|
||||
|
||||
// Go and fetch the actual data, since this isn't a relay request
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, hashes);
|
||||
// Keep track of the source peer, for direct connections
|
||||
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
|
||||
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
|
||||
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
@@ -468,21 +504,41 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
// Forwarding
|
||||
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(arbitraryTransactionData.getName()));
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
|
||||
if (!isBlocked) {
|
||||
Peer requestingPeer = request.getB();
|
||||
if (requestingPeer != null) {
|
||||
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
|
||||
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
|
||||
|
||||
// Add each hash to our local mapping so we know who to ask later
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
ArbitraryRelayInfo relayMap = new ArbitraryRelayInfo(hash58, signature58, peer, now);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayMap);
|
||||
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
|
||||
}
|
||||
|
||||
// Bump requestHops if it exists
|
||||
if (requestHops != null) {
|
||||
requestHops++;
|
||||
}
|
||||
|
||||
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file list");
|
||||
}
|
||||
}
|
||||
@@ -501,21 +557,30 @@ public class ArbitraryDataFileListManager {
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
byte[] signature = getArbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.debug("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Received hash list request from peer {} for signature {}", peer, signature58);
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
|
||||
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
|
||||
|
||||
if (requestingPeer != null) {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
|
||||
}
|
||||
|
||||
List<byte[]> hashes = new ArrayList<>();
|
||||
ArbitraryTransactionData transactionData = null;
|
||||
boolean allChunksExist = false;
|
||||
boolean hasMetadata = false;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
@@ -526,12 +591,8 @@ public class ArbitraryDataFileListManager {
|
||||
// Check if we're even allowed to serve data for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
@@ -540,6 +601,7 @@ public class ArbitraryDataFileListManager {
|
||||
// Add the metadata file
|
||||
if (arbitraryDataFile.getMetadataHash() != null) {
|
||||
requestedHashes.add(arbitraryDataFile.getMetadataHash());
|
||||
hasMetadata = true;
|
||||
}
|
||||
|
||||
// Add the chunk hashes
|
||||
@@ -572,9 +634,18 @@ public class ArbitraryDataFileListManager {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
|
||||
}
|
||||
|
||||
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
|
||||
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
|
||||
if (hasMetadata && hashes.size() == 1) {
|
||||
hashes.clear();
|
||||
}
|
||||
|
||||
// We should only respond if we have at least one hash
|
||||
if (hashes.size() > 0) {
|
||||
|
||||
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
|
||||
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
|
||||
|
||||
// We have all the chunks, so update requests map to reflect that we've sent it
|
||||
// There is no need to keep track of the request, as we can serve all the chunks
|
||||
if (allChunksExist) {
|
||||
@@ -582,8 +653,20 @@ public class ArbitraryDataFileListManager {
|
||||
arbitraryDataFileListRequests.put(message.getId(), newEntry);
|
||||
}
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
|
||||
hashes, NTP.getTime(), 0, ourAddress, true);
|
||||
}
|
||||
|
||||
arbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
LOGGER.debug("Couldn't send list of hashes");
|
||||
peer.disconnect("failed to send list of hashes");
|
||||
@@ -600,13 +683,12 @@ public class ArbitraryDataFileListManager {
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName()));
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops();
|
||||
getArbitraryDataFileListMessage.setRequestHops(++requestHops);
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
@@ -614,11 +696,15 @@ public class ArbitraryDataFileListManager {
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer -> broadcastPeer == peer ||
|
||||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())
|
||||
? null : getArbitraryDataFileListMessage);
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
}
|
||||
else {
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryDirectConnectionInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryRelayInfo;
|
||||
import org.qortal.data.network.ArbitraryPeerData;
|
||||
import org.qortal.data.network.PeerData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.network.Network;
|
||||
@@ -18,7 +20,6 @@ import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
@@ -37,7 +38,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
/**
|
||||
* Map to keep track of our in progress (outgoing) arbitrary data file requests
|
||||
*/
|
||||
private Map<String, Long> arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>());
|
||||
public Map<String, Long> arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
/**
|
||||
* Map to keep track of hashes that we might need to relay
|
||||
@@ -45,11 +46,24 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
public List<ArbitraryRelayInfo> arbitraryRelayMap = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
/**
|
||||
* Map to keep track of any arbitrary data file hash responses
|
||||
* Key: string - the hash encoded in base58
|
||||
* Value: Triple<respondingPeer, signature58, timeResponded>
|
||||
* List to keep track of any arbitrary data file hash responses
|
||||
*/
|
||||
public Map<String, Triple<Peer, String, Long>> arbitraryDataFileHashResponses = Collections.synchronizedMap(new HashMap<>());
|
||||
public final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
/**
|
||||
* List to keep track of peers potentially available for direct connections, based on recent requests
|
||||
*/
|
||||
private List<ArbitraryDirectConnectionInfo> directConnectionInfo = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
/**
|
||||
* Map to keep track of peers requesting QDN data that we hold.
|
||||
* Key = peer address string, value = time of last request.
|
||||
* This allows for additional "burst" connections beyond existing limits.
|
||||
*/
|
||||
private Map<String, Long> recentDataRequests = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
|
||||
public static int MAX_FILE_HASH_RESPONSES = 1000;
|
||||
|
||||
|
||||
private ArbitraryDataFileManager() {
|
||||
@@ -68,7 +82,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
try {
|
||||
// Use a fixed thread pool to execute the arbitrary data file requests
|
||||
int threadCount = 10;
|
||||
int threadCount = 5;
|
||||
ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
|
||||
for (int i = 0; i < threadCount; i++) {
|
||||
arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
|
||||
@@ -98,7 +112,13 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
|
||||
arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
|
||||
arbitraryDataFileHashResponses.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < relayMinimumTimestamp);
|
||||
arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp);
|
||||
|
||||
final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
|
||||
directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
|
||||
|
||||
final long recentDataRequestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RECENT_DATA_REQUESTS_TIMEOUT;
|
||||
recentDataRequests.entrySet().removeIf(entry -> entry.getValue() < recentDataRequestMinimumTimestamp);
|
||||
}
|
||||
|
||||
|
||||
@@ -112,9 +132,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
List<byte[]> hashes) throws DataException {
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
boolean receivedAtLeastOneFile = false;
|
||||
|
||||
// Now fetch actual data from this peer
|
||||
@@ -128,10 +146,10 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
|
||||
LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
|
||||
Long startTime = NTP.getTime();
|
||||
ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null);
|
||||
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, signature, hash, null);
|
||||
Long endTime = NTP.getTime();
|
||||
if (receivedArbitraryDataFileMessage != null) {
|
||||
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer, (endTime-startTime));
|
||||
if (receivedArbitraryDataFile != null) {
|
||||
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
|
||||
receivedAtLeastOneFile = true;
|
||||
|
||||
// Remove this hash from arbitraryDataFileHashResponses now that we have received it
|
||||
@@ -148,7 +166,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature));
|
||||
LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -158,16 +176,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
}
|
||||
|
||||
if (receivedAtLeastOneFile) {
|
||||
// Update our lookup table to indicate that this peer holds data for this signature
|
||||
String peerAddress = peer.getPeerData().getAddress().toString();
|
||||
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer);
|
||||
repository.discardChanges();
|
||||
if (arbitraryPeerData.isPeerAddressValid()) {
|
||||
LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature));
|
||||
repository.getArbitraryRepository().save(arbitraryPeerData);
|
||||
repository.saveChanges();
|
||||
}
|
||||
|
||||
// Invalidate the hosted transactions cache as we are now hosting something new
|
||||
ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache();
|
||||
|
||||
@@ -177,26 +185,17 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
// We have all the chunks for this transaction, so we should invalidate the transaction's name's
|
||||
// data cache so that it is rebuilt the next time we serve it
|
||||
ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
|
||||
|
||||
// We may also need to broadcast to the network that we are now hosting files for this transaction,
|
||||
// but only if these files are in accordance with our storage policy
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) {
|
||||
// Use a null peer address to indicate our own
|
||||
Message newArbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, 0, Arrays.asList(signature));
|
||||
Network.getInstance().broadcast(broadcastPeer -> newArbitrarySignatureMessage);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return receivedAtLeastOneFile;
|
||||
}
|
||||
|
||||
private ArbitraryDataFileMessage fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
boolean fileAlreadyExists = existingFile.exists();
|
||||
String hash58 = Base58.encode(hash);
|
||||
Message message = null;
|
||||
ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
// Fetch the file if it doesn't exist locally
|
||||
if (!fileAlreadyExists) {
|
||||
@@ -204,10 +203,11 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
arbitraryDataFileRequests.put(hash58, NTP.getTime());
|
||||
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
|
||||
|
||||
Message response = null;
|
||||
try {
|
||||
message = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
|
||||
response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
|
||||
} catch (InterruptedException e) {
|
||||
// Will return below due to null message
|
||||
// Will return below due to null response
|
||||
}
|
||||
arbitraryDataFileRequests.remove(hash58);
|
||||
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
|
||||
@@ -215,45 +215,42 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
// We may need to remove the file list request, if we have all the files for this transaction
|
||||
this.handleFileListRequests(signature);
|
||||
|
||||
if (message == null) {
|
||||
LOGGER.debug("Received null message from peer {}", peer);
|
||||
if (response == null) {
|
||||
LOGGER.debug("Received null response from peer {}", peer);
|
||||
return null;
|
||||
}
|
||||
if (message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received message with invalid type: {} from peer {}", message.getType(), peer);
|
||||
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
} else {
|
||||
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
|
||||
arbitraryDataFile = existingFile;
|
||||
}
|
||||
|
||||
if (arbitraryDataFile == null) {
|
||||
// We don't have a file, so give up here
|
||||
return null;
|
||||
}
|
||||
ArbitraryDataFileMessage arbitraryDataFileMessage = (ArbitraryDataFileMessage) message;
|
||||
|
||||
// We might want to forward the request to the peer that originally requested it
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, message, originalMessage);
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
|
||||
|
||||
boolean isRelayRequest = (requestingPeer != null);
|
||||
if (isRelayRequest) {
|
||||
if (!fileAlreadyExists) {
|
||||
// File didn't exist locally before the request, and it's a forwarding request, so delete it
|
||||
LOGGER.debug("Deleting file {} because it was needed for forwarding only", Base58.encode(hash));
|
||||
ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile();
|
||||
|
||||
// Keep trying to delete the data until it is deleted, or we reach 10 attempts
|
||||
for (int i=0; i<10; i++) {
|
||||
if (dataFile.delete()) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(1000L);
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit method
|
||||
}
|
||||
}
|
||||
arbitraryDataFile.delete(10);
|
||||
}
|
||||
}
|
||||
|
||||
return arbitraryDataFileMessage;
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
private void handleFileListRequests(byte[] signature) {
|
||||
@@ -293,7 +290,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
// The ID needs to match that of the original request
|
||||
message.setId(originalMessage.getId());
|
||||
|
||||
if (!requestingPeer.sendMessage(message)) {
|
||||
if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file");
|
||||
}
|
||||
@@ -305,89 +302,135 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
// Fetch data directly from peers
|
||||
|
||||
private List<ArbitraryDirectConnectionInfo> getDirectConnectionInfoForSignature(byte[] signature) {
|
||||
synchronized (directConnectionInfo) {
|
||||
return directConnectionInfo.stream().filter(i -> Arrays.equals(i.getSignature(), signature)).collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an ArbitraryDirectConnectionInfo item, but only if one with this peer-signature combination
|
||||
* doesn't already exist.
|
||||
* @param connectionInfo - the direct connection info to add
|
||||
*/
|
||||
public void addDirectConnectionInfoIfUnique(ArbitraryDirectConnectionInfo connectionInfo) {
|
||||
boolean peerAlreadyExists;
|
||||
synchronized (directConnectionInfo) {
|
||||
peerAlreadyExists = directConnectionInfo.stream()
|
||||
.anyMatch(i -> Arrays.equals(i.getSignature(), connectionInfo.getSignature())
|
||||
&& Objects.equals(i.getPeerAddress(), connectionInfo.getPeerAddress()));
|
||||
}
|
||||
if (!peerAlreadyExists) {
|
||||
directConnectionInfo.add(connectionInfo);
|
||||
}
|
||||
}
|
||||
|
||||
private void removeDirectConnectionInfo(ArbitraryDirectConnectionInfo connectionInfo) {
|
||||
this.directConnectionInfo.remove(connectionInfo);
|
||||
}
|
||||
|
||||
public boolean fetchDataFilesFromPeersForSignature(byte[] signature) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
ArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true);
|
||||
|
||||
// Firstly fetch peers that claim to be hosting files for this signature
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
boolean success = false;
|
||||
|
||||
List<ArbitraryPeerData> peers = repository.getArbitraryRepository().getArbitraryPeerDataForSignature(signature);
|
||||
if (peers == null || peers.isEmpty()) {
|
||||
LOGGER.debug("No peers found for signature {}", signature58);
|
||||
return false;
|
||||
}
|
||||
|
||||
LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58);
|
||||
|
||||
// Peers found, so pick a random one and request data from it
|
||||
int index = new SecureRandom().nextInt(peers.size());
|
||||
ArbitraryPeerData arbitraryPeerData = peers.get(index);
|
||||
String peerAddressString = arbitraryPeerData.getPeerAddress();
|
||||
boolean success = Network.getInstance().requestDataFromPeer(peerAddressString, signature);
|
||||
|
||||
// Parse the peer address to find the host and port
|
||||
String host = null;
|
||||
int port = -1;
|
||||
String[] parts = peerAddressString.split(":");
|
||||
if (parts.length > 1) {
|
||||
host = parts[0];
|
||||
port = Integer.parseInt(parts[1]);
|
||||
}
|
||||
|
||||
// If unsuccessful, and using a non-standard port, try a second connection with the default listen port,
|
||||
// since almost all nodes use that. This is a workaround to account for any ephemeral ports that may
|
||||
// have made it into the dataset.
|
||||
if (!success) {
|
||||
if (host != null && port > 0) {
|
||||
int defaultPort = Settings.getInstance().getDefaultListenPort();
|
||||
if (port != defaultPort) {
|
||||
String newPeerAddressString = String.format("%s:%d", host, defaultPort);
|
||||
success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature);
|
||||
}
|
||||
try {
|
||||
while (!success) {
|
||||
if (isStopping) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
Thread.sleep(500L);
|
||||
|
||||
// If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect
|
||||
// to each of those in turn until one succeeds.
|
||||
if (!success) {
|
||||
if (host != null) {
|
||||
final String finalHost = host;
|
||||
List<PeerData> knownPeers = Network.getInstance().getAllKnownPeers().stream()
|
||||
.filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost))
|
||||
.collect(Collectors.toList());
|
||||
// Loop through each match and attempt a connection
|
||||
for (PeerData matchingPeer : knownPeers) {
|
||||
String matchingPeerAddress = matchingPeer.getAddress().toString();
|
||||
success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature);
|
||||
if (success) {
|
||||
// Successfully connected, so stop making connections
|
||||
break;
|
||||
// Firstly fetch peers that claim to be hosting files for this signature
|
||||
List<ArbitraryDirectConnectionInfo> connectionInfoList = getDirectConnectionInfoForSignature(signature);
|
||||
if (connectionInfoList == null || connectionInfoList.isEmpty()) {
|
||||
LOGGER.debug("No remaining direct connection peers found for signature {}", signature58);
|
||||
return false;
|
||||
}
|
||||
|
||||
LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58);
|
||||
|
||||
// Peers found, so pick one with the highest number of chunks
|
||||
Comparator<ArbitraryDirectConnectionInfo> highestChunkCountFirstComparator =
|
||||
Comparator.comparingInt(ArbitraryDirectConnectionInfo::getHashCount).reversed();
|
||||
ArbitraryDirectConnectionInfo directConnectionInfo = connectionInfoList.stream()
|
||||
.sorted(highestChunkCountFirstComparator).findFirst().orElse(null);
|
||||
|
||||
if (directConnectionInfo == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Remove from the list so that a different peer is tried next time
|
||||
removeDirectConnectionInfo(directConnectionInfo);
|
||||
|
||||
String peerAddressString = directConnectionInfo.getPeerAddress();
|
||||
|
||||
// Parse the peer address to find the host and port
|
||||
String host = null;
|
||||
int port = -1;
|
||||
String[] parts = peerAddressString.split(":");
|
||||
if (parts.length > 1) {
|
||||
host = parts[0];
|
||||
port = Integer.parseInt(parts[1]);
|
||||
} else {
|
||||
// Assume no port included
|
||||
host = peerAddressString;
|
||||
// Use default listen port
|
||||
port = Settings.getInstance().getDefaultListenPort();
|
||||
}
|
||||
|
||||
String peerAddressStringWithPort = String.format("%s:%d", host, port);
|
||||
success = Network.getInstance().requestDataFromPeer(peerAddressStringWithPort, signature);
|
||||
|
||||
int defaultPort = Settings.getInstance().getDefaultListenPort();
|
||||
|
||||
// If unsuccessful, and using a non-standard port, try a second connection with the default listen port,
|
||||
// since almost all nodes use that. This is a workaround to account for any ephemeral ports that may
|
||||
// have made it into the dataset.
|
||||
if (!success) {
|
||||
if (host != null && port > 0) {
|
||||
if (port != defaultPort) {
|
||||
String newPeerAddressString = String.format("%s:%d", host, defaultPort);
|
||||
success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of the success or failure
|
||||
arbitraryPeerData.markAsAttempted();
|
||||
if (success) {
|
||||
arbitraryPeerData.markAsRetrieved();
|
||||
arbitraryPeerData.incrementSuccesses();
|
||||
}
|
||||
else {
|
||||
arbitraryPeerData.incrementFailures();
|
||||
}
|
||||
repository.discardChanges();
|
||||
repository.getArbitraryRepository().save(arbitraryPeerData);
|
||||
repository.saveChanges();
|
||||
// If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect
|
||||
// to each of those in turn until one succeeds.
|
||||
if (!success) {
|
||||
if (host != null) {
|
||||
final String finalHost = host;
|
||||
List<PeerData> knownPeers = Network.getInstance().getAllKnownPeers().stream()
|
||||
.filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost))
|
||||
.collect(Collectors.toList());
|
||||
// Loop through each match and attempt a connection
|
||||
for (PeerData matchingPeer : knownPeers) {
|
||||
String matchingPeerAddress = matchingPeer.getAddress().toString();
|
||||
int matchingPeerPort = matchingPeer.getAddress().getPort();
|
||||
// Make sure that it's not a port we've already tried
|
||||
if (matchingPeerPort != port && matchingPeerPort != defaultPort) {
|
||||
success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature);
|
||||
if (success) {
|
||||
// Successfully connected, so stop making connections
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return success;
|
||||
if (success) {
|
||||
// We were able to connect with a peer, so track the request
|
||||
ArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true);
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to fetch peer list from repository");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
return false;
|
||||
return success;
|
||||
}
|
||||
|
||||
|
||||
@@ -401,6 +444,33 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
private ArbitraryRelayInfo getOptimalRelayInfoEntryForHash(String hash58) {
|
||||
LOGGER.trace("Fetching relay info for hash: {}", hash58);
|
||||
List<ArbitraryRelayInfo> relayInfoList = this.getRelayInfoListForHash(hash58);
|
||||
if (relayInfoList != null && !relayInfoList.isEmpty()) {
|
||||
|
||||
// Remove any with null requestHops
|
||||
relayInfoList.removeIf(r -> r.getRequestHops() == null);
|
||||
|
||||
// If list is now empty, then just return one at random
|
||||
if (relayInfoList.isEmpty()) {
|
||||
return this.getRandomRelayInfoEntryForHash(hash58);
|
||||
}
|
||||
|
||||
// Sort by number of hops (lowest first)
|
||||
relayInfoList.sort(Comparator.comparingInt(ArbitraryRelayInfo::getRequestHops));
|
||||
|
||||
// FUTURE: secondary sort by requestTime?
|
||||
|
||||
ArbitraryRelayInfo relayInfo = relayInfoList.get(0);
|
||||
|
||||
LOGGER.trace("Returning optimal relay info for hash: {} (requestHops {})", hash58, relayInfo.getRequestHops());
|
||||
return relayInfo;
|
||||
}
|
||||
LOGGER.trace("No relay info exists for hash: {}", hash58);
|
||||
return null;
|
||||
}
|
||||
|
||||
private ArbitraryRelayInfo getRandomRelayInfoEntryForHash(String hash58) {
|
||||
LOGGER.trace("Fetching random relay info for hash: {}", hash58);
|
||||
List<ArbitraryRelayInfo> relayInfoList = this.getRelayInfoListForHash(hash58);
|
||||
@@ -433,6 +503,45 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
}
|
||||
|
||||
|
||||
// Peers requesting QDN data from us
|
||||
|
||||
/**
|
||||
* Add an address string of a peer that is trying to request data from us.
|
||||
* @param peerAddress
|
||||
*/
|
||||
public void addRecentDataRequest(String peerAddress) {
|
||||
if (peerAddress == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure to remove the port, since it isn't guaranteed to match next time
|
||||
String[] parts = peerAddress.split(":");
|
||||
if (parts.length == 0) {
|
||||
return;
|
||||
}
|
||||
String host = parts[0];
|
||||
if (!InetAddresses.isInetAddress(host)) {
|
||||
// Invalid host
|
||||
return;
|
||||
}
|
||||
|
||||
this.recentDataRequests.put(host, now);
|
||||
}
|
||||
|
||||
public boolean isPeerRequestingData(String peerAddressWithoutPort) {
|
||||
return this.recentDataRequests.containsKey(peerAddressWithoutPort);
|
||||
}
|
||||
|
||||
public boolean hasPendingDataRequest() {
|
||||
return !this.recentDataRequests.isEmpty();
|
||||
}
|
||||
|
||||
|
||||
// Network handlers
|
||||
|
||||
public void onNetworkGetArbitraryDataFileMessage(Peer peer, Message message) {
|
||||
@@ -451,19 +560,22 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
ArbitraryRelayInfo relayInfo = this.getRandomRelayInfoEntryForHash(hash58);
|
||||
ArbitraryRelayInfo relayInfo = this.getOptimalRelayInfoEntryForHash(hash58);
|
||||
|
||||
if (arbitraryDataFile.exists()) {
|
||||
LOGGER.trace("Hash {} exists", hash58);
|
||||
|
||||
// We can serve the file directly as we already have it
|
||||
LOGGER.debug("Sending file {}...", arbitraryDataFile);
|
||||
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
|
||||
arbitraryDataFileMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(arbitraryDataFileMessage)) {
|
||||
LOGGER.debug("Couldn't sent file");
|
||||
if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
|
||||
peer.disconnect("failed to send file");
|
||||
}
|
||||
LOGGER.debug("Sent file {}", arbitraryDataFile);
|
||||
else {
|
||||
LOGGER.debug("Sent file {}", arbitraryDataFile);
|
||||
}
|
||||
}
|
||||
else if (relayInfo != null) {
|
||||
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
|
||||
@@ -488,9 +600,10 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
// Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
|
||||
LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile));
|
||||
|
||||
// We'll send empty block summaries message as it's very short
|
||||
// TODO: use a different message type here
|
||||
Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList());
|
||||
// Send generic 'unknown' message as it's very short
|
||||
Message fileUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION
|
||||
? new GenericUnknownMessage()
|
||||
: new BlockSummariesMessage(Collections.emptyList());
|
||||
fileUnknownMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(fileUnknownMessage)) {
|
||||
LOGGER.debug("Couldn't sent file-unknown response");
|
||||
|
||||
@@ -3,6 +3,7 @@ package org.qortal.controller.arbitrary;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.repository.DataException;
|
||||
@@ -11,11 +12,9 @@ import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ArbitraryDataFileRequestThread implements Runnable {
|
||||
|
||||
@@ -51,39 +50,47 @@ public class ArbitraryDataFileRequestThread implements Runnable {
|
||||
boolean shouldProcess = false;
|
||||
|
||||
synchronized (arbitraryDataFileManager.arbitraryDataFileHashResponses) {
|
||||
Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
if (!arbitraryDataFileManager.arbitraryDataFileHashResponses.isEmpty()) {
|
||||
|
||||
Map.Entry entry = (Map.Entry) iterator.next();
|
||||
if (entry == null || entry.getKey() == null || entry.getValue() == null) {
|
||||
// Sort by lowest number of node hops first
|
||||
Comparator<ArbitraryFileListResponseInfo> lowestHopsFirstComparator =
|
||||
Comparator.comparingInt(ArbitraryFileListResponseInfo::getRequestHops);
|
||||
arbitraryDataFileManager.arbitraryDataFileHashResponses.sort(lowestHopsFirstComparator);
|
||||
|
||||
Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryFileListResponseInfo responseInfo = (ArbitraryFileListResponseInfo) iterator.next();
|
||||
if (responseInfo == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
hash58 = responseInfo.getHash58();
|
||||
peer = responseInfo.getPeer();
|
||||
signature58 = responseInfo.getSignature58();
|
||||
Long timestamp = responseInfo.getTimestamp();
|
||||
|
||||
if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
|
||||
// Ignore - to be deleted
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if already requesting, but don't remove, as we might want to retry later
|
||||
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(hash58)) {
|
||||
// Already requesting - leave this attempt for later
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file
|
||||
shouldProcess = true;
|
||||
iterator.remove();
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
hash58 = (String) entry.getKey();
|
||||
Triple<Peer, String, Long> value = (Triple<Peer, String, Long>) entry.getValue();
|
||||
if (value == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
peer = value.getA();
|
||||
signature58 = value.getB();
|
||||
Long timestamp = value.getC();
|
||||
|
||||
if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
|
||||
// Ignore - to be deleted
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file
|
||||
shouldProcess = true;
|
||||
iterator.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +114,7 @@ public class ArbitraryDataFileRequestThread implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Fetching file {} from peer {} via request thread...", hash58, peer);
|
||||
LOGGER.trace("Fetching file {} from peer {} via request thread...", hash58, peer);
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
|
||||
|
||||
} catch (DataException e) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user