diff --git a/core/pom.xml b/core/pom.xml
index 6b536236..475c21ce 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -20,19 +20,19 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0
- com.dogecoin
- dogecoinj-parent
+ org.altcoinj
+ altcoinj-parent0.13-SNAPSHOT
- dogecoinj-core
+ altcoinj-core
- dogecoinj
- A Java Dogecoin library
+ altcoinj
+ Extension library to bitcoinj to add altcoin supportjar
- https://github.com/langerhans/dogecoinj-new
+ https://altcoinj.github.io
@@ -45,8 +45,8 @@
- The bitcoinj team. Port to Dogecoin by the Dogecoin developers
- info@dogecoin.com
+ The altcoinj team
+ ross@dogecoin.com
@@ -136,6 +136,7 @@
http://docs.guava-libraries.googlecode.com/git-history/release/javadoc/
+ true
@@ -164,8 +165,10 @@
- com.dogecoin:orchid:1.1:jar:null:compile:393d53cad40f32f1c00a717326deeb4bde8ee57bcglib:cglib-nodep:2.2:jar:null:test:59afed7ab65e7ec6585d5bc60556c3cbd203532b
+ com.fasterxml.jackson.core:jackson-annotations:2.4.0:jar:null:test:d6a66c7a5f01cf500377bd669507a08cfeba882a
+ com.fasterxml.jackson.core:jackson-core:2.4.2:jar:null:test:ceb72830d95c512b4b300a38f29febc85bdf6e4b
+ com.fasterxml.jackson.core:jackson-databind:2.4.2:jar:null:test:8e31266a272ad25ac4c089734d93e8d811652c1fcom.google.code.findbugs:jsr305:2.0.1:jar:null:compile:516c03b21d50a644d538de0f0369c620989cd8f0com.google.guava:guava:16.0.1:jar:null:compile:5fa98cd1a63c99a44dd8d3b77e4762b066a5d0c5com.google.protobuf:protobuf-java:2.5.0:jar:null:compile:a10732c76bfacdbd633a7eb0f7968b1059a65dfa
@@ -173,31 +176,30 @@
com.lambdaworks:scrypt:1.4.0:jar:null:compile:906506b74f30c8c20bccd9ed4a11112d8941fe87com.madgag.spongycastle:core:1.51.0.0:jar:null:compile:0f642963312ea0e615ad65f28adc5a5b3a2a0862junit:junit:4.11:jar:null:test:4e031bb61df09069aeb2bffb4019e7a5034a4ee0
+ mysql:mysql-connector-java:5.1.33:jar:null:compile:8af455a9a3267e6664cafc87ace71a4e4ef02837net.jcip:jcip-annotations:1.0:jar:null:compile:afba4942caaeaf46aab0b976afd57cc7c181467e
- org.apache.maven.plugins:maven-clean-plugin:2.5:maven-plugin:null:runtime:75653decaefa85ca8114ff3a4f869bb2ee6d605d
- org.apache.maven.plugins:maven-compiler-plugin:3.1:maven-plugin:null:runtime:9977a8d04e75609cf01badc4eb6a9c7198c4c5ea
- org.apache.maven.plugins:maven-dependency-plugin:2.8:maven-plugin:null:runtime:04c8dedf3d9b2a3f45f3daa93e11ca547d2063ca
- org.apache.maven.plugins:maven-deploy-plugin:2.7:maven-plugin:null:runtime:6dadfb75679ca010b41286794f737088ebfe12fd
- org.apache.maven.plugins:maven-enforcer-plugin:1.2:maven-plugin:null:runtime:6b755a9a0d618f8f57c0b5c4a0737a012e710a46
- org.apache.maven.plugins:maven-install-plugin:2.5.1:maven-plugin:null:runtime:b6f5a4b621b9c26699c8deadb20fdc35ce568e35
- org.apache.maven.plugins:maven-jar-plugin:2.5:maven-plugin:null:runtime:344d667f5ec8b90d03d698d096a1147672fc522f
- org.apache.maven.plugins:maven-javadoc-plugin:2.9.1:maven-plugin:null:runtime:95ea7abf00e37e08bd927bf7e448c1e7fe4c6cb9
- org.apache.maven.plugins:maven-resources-plugin:2.6:maven-plugin:null:runtime:dd093ff6a4b680eae7ae83b5ab04310249fc6590
+ org.apache.maven.plugins:maven-clean-plugin:2.6.1:maven-plugin:null:runtime:bfdf7d6c2f8fc8759457e9d54f458ba56ac7b30f
+ org.apache.maven.plugins:maven-compiler-plugin:3.2:maven-plugin:null:runtime:aec10f274ac07fafab8906cb1aa69669d753b2c2
+ org.apache.maven.plugins:maven-dependency-plugin:2.10:maven-plugin:null:runtime:af87ceeb71c6499147c5d27f74c9317bf707538e
+ org.apache.maven.plugins:maven-deploy-plugin:2.8.2:maven-plugin:null:runtime:3c2d83ecd387e9843142ae92a0439792c1500319
+ org.apache.maven.plugins:maven-enforcer-plugin:1.0:maven-plugin:null:runtime:ad032b7593576e9fe9305c73865633e163895b29
+ org.apache.maven.plugins:maven-install-plugin:2.5.2:maven-plugin:null:runtime:8a67631619fc3c1d1f036e59362ddce71e1e496f
+ org.apache.maven.plugins:maven-jar-plugin:2.6:maven-plugin:null:runtime:618f08d0fcdd3929af846ef1b65503b5904f93e3
+ org.apache.maven.plugins:maven-javadoc-plugin:2.10.2:maven-plugin:null:runtime:5f391697fa85cecc7e5bac7ce5a6f9d056a58ba3
+ org.apache.maven.plugins:maven-resources-plugin:2.7:maven-plugin:null:runtime:94af11389943a480ecec7db01b4ded1b9cdf57c5org.apache.maven.plugins:maven-shade-plugin:2.3:maven-plugin:null:runtime:d136adc7abccc9c12adcad6ae7a9bc51b2b7184b
- org.apache.maven.plugins:maven-site-plugin:3.3:maven-plugin:null:runtime:77ba1752b1ac4c4339d6f11554800960a56a4ae1
- org.apache.maven.plugins:maven-source-plugin:2.1.2:maven-plugin:null:runtime:35154aa8e6e0e84c2b5c10c3d5220d65670ba984
- org.apache.maven.plugins:maven-surefire-plugin:2.12.4:maven-plugin:null:runtime:2b435f7f77777d2e62354fdc690da3f1dc47a26b
- org.codehaus.mojo:cobertura-maven-plugin:2.6:maven-plugin:null:runtime:5204735a0642b42f5647d8ec876d4301e328c0d5
+ org.apache.maven.plugins:maven-site-plugin:3.4:maven-plugin:null:runtime:659cd5f1dd8bff554cf52603339494cbf7f283c5
+ org.apache.maven.plugins:maven-source-plugin:2.4:maven-plugin:null:runtime:46f0d7f7823d729ba300d3f8929900c7e9cb5ac0
+ org.apache.maven.plugins:maven-surefire-plugin:2.18.1:maven-plugin:null:runtime:402fd3066fd6d85ea4a1a3e7cd82a7e35037e6e8org.easymock:easymock:3.0:jar:null:test:f28a4c31c330f95c9acbf1108cea19952b5c496f
+ org.eluder.coveralls:coveralls-maven-plugin:3.1.0:maven-plugin:null:runtime:ca9d2915e2b1e99f15c9f54ad653eda893d42a69org.hamcrest:hamcrest-core:1.3:jar:null:test:42a25dc3219429f0e5d060061f71acb49bf010a0
+ org.jacoco:jacoco-maven-plugin:0.7.4.201502262128:maven-plugin:null:runtime:ee12ed04db135c74d0ae99e9c4e4754ee1582edborg.objenesis:objenesis:1.2:jar:null:test:bfcb0539a071a4c5a30690388903ac48c0667f2aorg.slf4j:slf4j-api:1.7.6:jar:null:compile:562424e36df3d2327e8e9301a76027fca17d54eaorg.slf4j:slf4j-jdk14:1.7.6:jar:null:test:1a3301a32ea7d90c3d33e9d60edbfdc9589fc748
- com.fasterxml.jackson.core:jackson-databind:2.4.2:jar:null:test:8e31266a272ad25ac4c089734d93e8d811652c1f
- com.fasterxml.jackson.core:jackson-core:2.4.2:jar:null:test:ceb72830d95c512b4b300a38f29febc85bdf6e4b
- com.fasterxml.jackson.core:jackson-annotations:2.4.2:jar:null:test:6bb52af09372d5064206d47d7887d41671f00f7d
- org.jacoco:jacoco-maven-plugin:0.7.2.201409121644:maven-plugin:null:runtime:b2cb310459d082db505fdfa66dbadd4d8bac8e34
- org.eluder.coveralls:coveralls-maven-plugin:3.0.1:maven-plugin:null:runtime:3907ee5cf1e5c85af7bb90e486ce4c7b1408a552
+ org.sonatype.plugins:nexus-staging-maven-plugin:1.6.5:maven-plugin:null:runtime:455ca2aa8cd14a06608f1538bd6a1efd09561563
+ postgresql:postgresql:9.1-901.jdbc4:jar:null:compile:153f2f92a786f12fc111d0111f709012df87c808uk.co.froot.maven.enforcer:digest-enforcer-rules:0.0.1:jar:null:runtime:16a9e04f3fe4bb143c42782d07d5faf65b32106f
@@ -254,7 +256,7 @@
org.jacocojacoco-maven-plugin
- 0.7.2.201409121644
+ 0.7.4.201502262128**/Protos*.class
@@ -298,17 +300,17 @@
org.apache.maven.pluginsmaven-surefire-plugin
- 2.12.4${surefireArgLine}
+ alphabetical
-
+
org.eluder.coverallscoveralls-maven-plugin
- 3.0.1
+ 3.1.0
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
@@ -398,7 +374,7 @@
3.0test
-
@@ -466,10 +442,21 @@
true
- com.dogecoin
+ org.fusesource.leveldbjni
+ leveldbjni-all
+ 1.8
+ true
+
+
+ org.bitcoinjorchid1.1
+
+ org.bitcoinj
+ bitcoinj-core
+ 0.13-SNAPSHOT
+
diff --git a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChain.java b/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChain.java
deleted file mode 100644
index bca4fb3b..00000000
--- a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChain.java
+++ /dev/null
@@ -1,1153 +0,0 @@
-/*
- * Copyright 2012 Google Inc.
- * Copyright 2014 Andreas Schildbach
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dogecoin.dogecoinj.core;
-
-import com.dogecoin.dogecoinj.store.BlockStore;
-import com.dogecoin.dogecoinj.store.BlockStoreException;
-import com.dogecoin.dogecoinj.utils.ListenerRegistration;
-import com.dogecoin.dogecoinj.utils.Threading;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nullable;
-import java.math.BigInteger;
-import java.util.*;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.Executor;
-import java.util.concurrent.locks.ReentrantLock;
-
-import static com.google.common.base.Preconditions.*;
-
-/**
- *
An AbstractBlockChain holds a series of {@link Block} objects, links them together, and knows how to verify that
- * the chain follows the rules of the {@link NetworkParameters} for this chain.
- *
- *
It can be connected to a {@link Wallet}, and also {@link BlockChainListener}s that can receive transactions and
- * notifications of re-organizations.
- *
- *
An AbstractBlockChain implementation must be connected to a {@link BlockStore} implementation. The chain object
- * by itself doesn't store any data, that's delegated to the store. Which store you use is a decision best made by
- * reading the getting started guide, but briefly, fully validating block chains need fully validating stores. In
- * the lightweight SPV mode, a {@link com.dogecoin.dogecoinj.store.SPVBlockStore} is the right choice.
- *
- *
This class implements an abstract class which makes it simple to create a BlockChain that does/doesn't do full
- * verification. It verifies headers and is implements most of what is required to implement SPV mode, but
- * also provides callback hooks which can be used to do full verification.
- *
- *
There are two subclasses of AbstractBlockChain that are useful: {@link BlockChain}, which is the simplest
- * class and implements simplified payment verification. This is a lightweight and efficient mode that does
- * not verify the contents of blocks, just their headers. A {@link FullPrunedBlockChain} paired with a
- * {@link com.dogecoin.dogecoinj.store.H2FullPrunedBlockStore} implements full verification, which is equivalent to the
- * original Satoshi client. To learn more about the alternative security models, please consult the articles on the
- * website.
- *
- * Theory
- *
- *
The 'chain' is actually a tree although in normal operation it operates mostly as a list of {@link Block}s.
- * When multiple new head blocks are found simultaneously, there are multiple stories of the economy competing to become
- * the one true consensus. This can happen naturally when two miners solve a block within a few seconds of each other,
- * or it can happen when the chain is under attack.
- *
- *
A reference to the head block of the best known chain is stored. If you can reach the genesis block by repeatedly
- * walking through the prevBlock pointers, then we say this is a full chain. If you cannot reach the genesis block
- * we say it is an orphan chain. Orphan chains can occur when blocks are solved and received during the initial block
- * chain download, or if we connect to a peer that doesn't send us blocks in order.
- *
- *
A reorganize occurs when the blocks that make up the best known chain changes. Note that simply adding a
- * new block to the top of the best chain isn't as reorganize, but that a reorganize is always triggered by adding
- * a new block that connects to some other (non best head) block. By "best" we mean the chain representing the largest
- * amount of work done.
- *
- *
Every so often the block chain passes a difficulty transition point. At that time, all the blocks in the last
- * 2016 blocks are examined and a new difficulty target is calculated from them.
- */
-public abstract class AbstractBlockChain {
- private static final Logger log = LoggerFactory.getLogger(AbstractBlockChain.class);
- protected final ReentrantLock lock = Threading.lock("blockchain");
-
- /** Keeps a map of block hashes to StoredBlocks. */
- private final BlockStore blockStore;
- private final Context context;
-
- /**
- * Tracks the top of the best known chain.
- *
- * Following this one down to the genesis block produces the story of the economy from the creation of Bitcoin
- * until the present day. The chain head can change if a new set of blocks is received that results in a chain of
- * greater work than the one obtained by following this one down. In that case a reorganize is triggered,
- * potentially invalidating transactions in our wallet.
- */
- protected StoredBlock chainHead;
-
- // TODO: Scrap this and use a proper read/write for all of the block chain objects.
- // The chainHead field is read/written synchronized with this object rather than BlockChain. However writing is
- // also guaranteed to happen whilst BlockChain is synchronized (see setChainHead). The goal of this is to let
- // clients quickly access the chain head even whilst the block chain is downloading and thus the BlockChain is
- // locked most of the time.
- private final Object chainHeadLock = new Object();
-
- protected final NetworkParameters params;
- private final CopyOnWriteArrayList> listeners;
-
- // Holds a block header and, optionally, a list of tx hashes or block's transactions
- class OrphanBlock {
- final Block block;
- final List filteredTxHashes;
- final Map filteredTxn;
- OrphanBlock(Block block, @Nullable List filteredTxHashes, @Nullable Map filteredTxn) {
- final boolean filtered = filteredTxHashes != null && filteredTxn != null;
- Preconditions.checkArgument((block.transactions == null && filtered)
- || (block.transactions != null && !filtered));
- if (!shouldVerifyTransactions())
- this.block = block.cloneAsHeader();
- else
- this.block = block;
- this.filteredTxHashes = filteredTxHashes;
- this.filteredTxn = filteredTxn;
- }
- }
- // Holds blocks that we have received but can't plug into the chain yet, eg because they were created whilst we
- // were downloading the block chain.
- private final LinkedHashMap orphanBlocks = new LinkedHashMap();
-
- /** False positive estimation uses a double exponential moving average. */
- public static final double FP_ESTIMATOR_ALPHA = 0.0001;
- /** False positive estimation uses a double exponential moving average. */
- public static final double FP_ESTIMATOR_BETA = 0.01;
-
- private double falsePositiveRate;
- private double falsePositiveTrend;
- private double previousFalsePositiveRate;
-
-
- /**
- * Constructs a BlockChain connected to the given list of listeners (eg, wallets) and a store.
- */
- public AbstractBlockChain(NetworkParameters params, List listeners,
- BlockStore blockStore) throws BlockStoreException {
- this.blockStore = blockStore;
- chainHead = blockStore.getChainHead();
- log.info("chain head is at height {}:\n{}", chainHead.getHeight(), chainHead.getHeader());
- this.params = params;
- this.listeners = new CopyOnWriteArrayList>();
- for (BlockChainListener l : listeners) addListener(l, Threading.SAME_THREAD);
- context = new Context();
- }
-
- public Context getContext() {
- return context;
- }
-
- /**
- * Add a wallet to the BlockChain. Note that the wallet will be unaffected by any blocks received while it
- * was not part of this BlockChain. This method is useful if the wallet has just been created, and its keys
- * have never been in use, or if the wallet has been loaded along with the BlockChain. Note that adding multiple
- * wallets is not well tested!
- */
- public void addWallet(Wallet wallet) {
- addListener(wallet, Threading.SAME_THREAD);
- int walletHeight = wallet.getLastBlockSeenHeight();
- int chainHeight = getBestChainHeight();
- if (walletHeight != chainHeight) {
- log.warn("Wallet/chain height mismatch: {} vs {}", walletHeight, chainHeight);
- log.warn("Hashes: {} vs {}", wallet.getLastBlockSeenHash(), getChainHead().getHeader().getHash());
-
- // This special case happens when the VM crashes because of a transaction received. It causes the updated
- // block store to persist, but not the wallet. In order to fix the issue, we roll back the block store to
- // the wallet height to make it look like as if the block has never been received.
- if (walletHeight < chainHeight && walletHeight > 0) {
- try {
- rollbackBlockStore(walletHeight);
- log.info("Rolled back block store to height {}.", walletHeight);
- } catch (BlockStoreException x) {
- log.warn("Rollback of block store failed, continuing with mismatched heights. This can happen due to a replay.");
- }
- }
- }
- }
-
- /** Removes a wallet from the chain. */
- public void removeWallet(Wallet wallet) {
- removeListener(wallet);
- }
-
- /**
- * Adds a generic {@link BlockChainListener} listener to the chain.
- */
- public void addListener(BlockChainListener listener) {
- addListener(listener, Threading.USER_THREAD);
- }
-
- /**
- * Adds a generic {@link BlockChainListener} listener to the chain.
- */
- public void addListener(BlockChainListener listener, Executor executor) {
- listeners.add(new ListenerRegistration(listener, executor));
- }
-
- /**
- * Removes the given {@link BlockChainListener} from the chain.
- */
- public void removeListener(BlockChainListener listener) {
- ListenerRegistration.removeFromList(listener, listeners);
- }
-
- /**
- * Returns the {@link BlockStore} the chain was constructed with. You can use this to iterate over the chain.
- */
- public BlockStore getBlockStore() {
- return blockStore;
- }
-
- /**
- * Adds/updates the given {@link Block} with the block store.
- * This version is used when the transactions have not been verified.
- * @param storedPrev The {@link StoredBlock} which immediately precedes block.
- * @param block The {@link Block} to add/update.
- * @return the newly created {@link StoredBlock}
- */
- protected abstract StoredBlock addToBlockStore(StoredBlock storedPrev, Block block)
- throws BlockStoreException, VerificationException;
-
- /**
- * Adds/updates the given {@link StoredBlock} with the block store.
- * This version is used when the transactions have already been verified to properly spend txOutputChanges.
- * @param storedPrev The {@link StoredBlock} which immediately precedes block.
- * @param header The {@link StoredBlock} to add/update.
- * @param txOutputChanges The total sum of all changes made by this block to the set of open transaction outputs
- * (from a call to connectTransactions), if in fully verifying mode (null otherwise).
- * @return the newly created {@link StoredBlock}
- */
- protected abstract StoredBlock addToBlockStore(StoredBlock storedPrev, Block header,
- @Nullable TransactionOutputChanges txOutputChanges)
- throws BlockStoreException, VerificationException;
-
- /**
- * Rollback the block store to a given height. This is currently only supported by {@link BlockChain} instances.
- *
- * @throws BlockStoreException
- * if the operation fails or is unsupported.
- */
- protected abstract void rollbackBlockStore(int height) throws BlockStoreException;
-
- /**
- * Called before setting chain head in memory.
- * Should write the new head to block store and then commit any database transactions
- * that were started by disconnectTransactions/connectTransactions.
- */
- protected abstract void doSetChainHead(StoredBlock chainHead) throws BlockStoreException;
-
- /**
- * Called if we (possibly) previously called disconnectTransaction/connectTransactions,
- * but will not be calling preSetChainHead as a block failed verification.
- * Can be used to abort database transactions that were started by
- * disconnectTransactions/connectTransactions.
- */
- protected abstract void notSettingChainHead() throws BlockStoreException;
-
- /**
- * For a standard BlockChain, this should return blockStore.get(hash),
- * for a FullPrunedBlockChain blockStore.getOnceUndoableStoredBlock(hash)
- */
- protected abstract StoredBlock getStoredBlockInCurrentScope(Sha256Hash hash) throws BlockStoreException;
-
- /**
- * Processes a received block and tries to add it to the chain. If there's something wrong with the block an
- * exception is thrown. If the block is OK but cannot be connected to the chain at this time, returns false.
- * If the block can be connected to the chain, returns true.
- * Accessing block's transactions in another thread while this method runs may result in undefined behavior.
- */
- public boolean add(Block block) throws VerificationException, PrunedException {
- try {
- return add(block, true, null, null);
- } catch (BlockStoreException e) {
- // TODO: Figure out a better way to propagate this exception to the user.
- throw new RuntimeException(e);
- } catch (VerificationException e) {
- try {
- notSettingChainHead();
- } catch (BlockStoreException e1) {
- throw new RuntimeException(e1);
- }
- throw new VerificationException("Could not verify block " + block.getHashAsString() + "\n" +
- block.toString(), e);
- }
- }
-
- /**
- * Processes a received block and tries to add it to the chain. If there's something wrong with the block an
- * exception is thrown. If the block is OK but cannot be connected to the chain at this time, returns false.
- * If the block can be connected to the chain, returns true.
- */
- public boolean add(FilteredBlock block) throws VerificationException, PrunedException {
- try {
- // The block has a list of hashes of transactions that matched the Bloom filter, and a list of associated
- // Transaction objects. There may be fewer Transaction objects than hashes, this is expected. It can happen
- // in the case where we were already around to witness the initial broadcast, so we downloaded the
- // transaction and sent it to the wallet before this point (the wallet may have thrown it away if it was
- // a false positive, as expected in any Bloom filtering scheme). The filteredTxn list here will usually
- // only be full of data when we are catching up to the head of the chain and thus haven't witnessed any
- // of the transactions.
- return add(block.getBlockHeader(), true, block.getTransactionHashes(), block.getAssociatedTransactions());
- } catch (BlockStoreException e) {
- // TODO: Figure out a better way to propagate this exception to the user.
- throw new RuntimeException(e);
- } catch (VerificationException e) {
- try {
- notSettingChainHead();
- } catch (BlockStoreException e1) {
- throw new RuntimeException(e1);
- }
- throw new VerificationException("Could not verify block " + block.getHash().toString() + "\n" +
- block.toString(), e);
- }
- }
-
- /**
- * Whether or not we are maintaining a set of unspent outputs and are verifying all transactions.
- * Also indicates that all calls to add() should provide a block containing transactions
- */
- protected abstract boolean shouldVerifyTransactions();
-
- /**
- * Connect each transaction in block.transactions, verifying them as we go and removing spent outputs
- * If an error is encountered in a transaction, no changes should be made to the underlying BlockStore.
- * and a VerificationException should be thrown.
- * Only called if(shouldVerifyTransactions())
- * @throws VerificationException if an attempt was made to spend an already-spent output, or if a transaction incorrectly solved an output script.
- * @throws BlockStoreException if the block store had an underlying error.
- * @return The full set of all changes made to the set of open transaction outputs.
- */
- protected abstract TransactionOutputChanges connectTransactions(int height, Block block) throws VerificationException, BlockStoreException;
-
- /**
- * Load newBlock from BlockStore and connect its transactions, returning changes to the set of unspent transactions.
- * If an error is encountered in a transaction, no changes should be made to the underlying BlockStore.
- * Only called if(shouldVerifyTransactions())
- * @throws PrunedException if newBlock does not exist as a {@link StoredUndoableBlock} in the block store.
- * @throws VerificationException if an attempt was made to spend an already-spent output, or if a transaction incorrectly solved an output script.
- * @throws BlockStoreException if the block store had an underlying error or newBlock does not exist in the block store at all.
- * @return The full set of all changes made to the set of open transaction outputs.
- */
- protected abstract TransactionOutputChanges connectTransactions(StoredBlock newBlock) throws VerificationException, BlockStoreException, PrunedException;
-
- // Stat counters.
- private long statsLastTime = System.currentTimeMillis();
- private long statsBlocksAdded;
-
- // filteredTxHashList contains all transactions, filteredTxn just a subset
- private boolean add(Block block, boolean tryConnecting,
- @Nullable List filteredTxHashList, @Nullable Map filteredTxn)
- throws BlockStoreException, VerificationException, PrunedException {
- lock.lock();
- try {
- // TODO: Use read/write locks to ensure that during chain download properties are still low latency.
- if (System.currentTimeMillis() - statsLastTime > 1000) {
- // More than a second passed since last stats logging.
- if (statsBlocksAdded > 1)
- log.info("{} blocks per second", statsBlocksAdded);
- statsLastTime = System.currentTimeMillis();
- statsBlocksAdded = 0;
- }
- // Quick check for duplicates to avoid an expensive check further down (in findSplit). This can happen a lot
- // when connecting orphan transactions due to the dumb brute force algorithm we use.
- if (block.equals(getChainHead().getHeader())) {
- return true;
- }
- if (tryConnecting && orphanBlocks.containsKey(block.getHash())) {
- return false;
- }
-
- // If we want to verify transactions (ie we are running with full blocks), verify that block has transactions
- if (shouldVerifyTransactions() && block.transactions == null)
- throw new VerificationException("Got a block header while running in full-block mode");
-
- // Check for already-seen block, but only for full pruned mode, where the DB is
- // more likely able to handle these queries quickly.
- if (shouldVerifyTransactions() && blockStore.get(block.getHash()) != null) {
- return true;
- }
-
- // Does this block contain any transactions we might care about? Check this up front before verifying the
- // blocks validity so we can skip the merkle root verification if the contents aren't interesting. This saves
- // a lot of time for big blocks.
- boolean contentsImportant = shouldVerifyTransactions();
- if (block.transactions != null) {
- contentsImportant = contentsImportant || containsRelevantTransactions(block);
- }
-
- // Prove the block is internally valid: hash is lower than target, etc. This only checks the block contents
- // if there is a tx sending or receiving coins using an address in one of our wallets. And those transactions
- // are only lightly verified: presence in a valid connecting block is taken as proof of validity. See the
- // article here for more details: http://code.google.com/p/bitcoinj/wiki/SecurityModel
- try {
- block.verifyHeader();
- if (contentsImportant)
- block.verifyTransactions();
- } catch (VerificationException e) {
- log.error("Failed to verify block: ", e);
- log.error(block.getHashAsString());
- throw e;
- }
-
- // Try linking it to a place in the currently known blocks.
- StoredBlock storedPrev = getStoredBlockInCurrentScope(block.getPrevBlockHash());
-
- if (storedPrev == null) {
- // We can't find the previous block. Probably we are still in the process of downloading the chain and a
- // block was solved whilst we were doing it. We put it to one side and try to connect it later when we
- // have more blocks.
- checkState(tryConnecting, "bug in tryConnectingOrphans");
- log.warn("Block does not connect: {} prev {}", block.getHashAsString(), block.getPrevBlockHash());
- orphanBlocks.put(block.getHash(), new OrphanBlock(block, filteredTxHashList, filteredTxn));
- return false;
- } else {
- // It connects to somewhere on the chain. Not necessarily the top of the best known chain.
- checkDifficultyTransitions(storedPrev, block);
- connectBlock(block, storedPrev, shouldVerifyTransactions(), filteredTxHashList, filteredTxn);
- }
-
- if (tryConnecting)
- tryConnectingOrphans();
-
- statsBlocksAdded++;
- return true;
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Returns the hashes of the currently stored orphan blocks and then deletes them from this objects storage.
- * Used by Peer when a filter exhaustion event has occurred and thus any orphan blocks that have been downloaded
- * might be inaccurate/incomplete.
- */
- public Set drainOrphanBlocks() {
- lock.lock();
- try {
- Set hashes = new HashSet(orphanBlocks.keySet());
- orphanBlocks.clear();
- return hashes;
- } finally {
- lock.unlock();
- }
- }
-
- // expensiveChecks enables checks that require looking at blocks further back in the chain
- // than the previous one when connecting (eg median timestamp check)
- // It could be exposed, but for now we just set it to shouldVerifyTransactions()
- private void connectBlock(final Block block, StoredBlock storedPrev, boolean expensiveChecks,
- @Nullable final List filteredTxHashList,
- @Nullable final Map filteredTxn) throws BlockStoreException, VerificationException, PrunedException {
- checkState(lock.isHeldByCurrentThread());
- boolean filtered = filteredTxHashList != null && filteredTxn != null;
- // Check that we aren't connecting a block that fails a checkpoint check
- if (!params.passesCheckpoint(storedPrev.getHeight() + 1, block.getHash()))
- throw new VerificationException("Block failed checkpoint lockin at " + (storedPrev.getHeight() + 1));
- if (shouldVerifyTransactions()) {
- checkNotNull(block.transactions);
- for (Transaction tx : block.transactions)
- if (!tx.isFinal(storedPrev.getHeight() + 1, block.getTimeSeconds()))
- throw new VerificationException("Block contains non-final transaction");
- }
-
- StoredBlock head = getChainHead();
- if (storedPrev.equals(head)) {
- if (filtered && filteredTxn.size() > 0) {
- log.debug("Block {} connects to top of best chain with {} transaction(s) of which we were sent {}",
- block.getHashAsString(), filteredTxHashList.size(), filteredTxn.size());
- for (Sha256Hash hash : filteredTxHashList) log.debug(" matched tx {}", hash);
- }
- if (expensiveChecks && block.getTimeSeconds() <= getMedianTimestampOfRecentBlocks(head, blockStore))
- throw new VerificationException("Block's timestamp is too early");
-
- // This block connects to the best known block, it is a normal continuation of the system.
- TransactionOutputChanges txOutChanges = null;
- if (shouldVerifyTransactions())
- txOutChanges = connectTransactions(storedPrev.getHeight() + 1, block);
- StoredBlock newStoredBlock = addToBlockStore(storedPrev,
- block.transactions == null ? block : block.cloneAsHeader(), txOutChanges);
- setChainHead(newStoredBlock);
- log.debug("Chain is now {} blocks high, running listeners", newStoredBlock.getHeight());
- informListenersForNewBlock(block, NewBlockType.BEST_CHAIN, filteredTxHashList, filteredTxn, newStoredBlock);
- } else {
- // This block connects to somewhere other than the top of the best known chain. We treat these differently.
- //
- // Note that we send the transactions to the wallet FIRST, even if we're about to re-organize this block
- // to become the new best chain head. This simplifies handling of the re-org in the Wallet class.
- StoredBlock newBlock = storedPrev.build(block);
- boolean haveNewBestChain = newBlock.moreWorkThan(head);
- if (haveNewBestChain) {
- log.info("Block is causing a re-organize");
- } else {
- StoredBlock splitPoint = findSplit(newBlock, head, blockStore);
- if (splitPoint != null && splitPoint.equals(newBlock)) {
- // newStoredBlock is a part of the same chain, there's no fork. This happens when we receive a block
- // that we already saw and linked into the chain previously, which isn't the chain head.
- // Re-processing it is confusing for the wallet so just skip.
- log.warn("Saw duplicated block in main chain at height {}: {}",
- newBlock.getHeight(), newBlock.getHeader().getHash());
- return;
- }
- if (splitPoint == null) {
- // This should absolutely never happen
- // (lets not write the full block to disk to keep any bugs which allow this to happen
- // from writing unreasonable amounts of data to disk)
- throw new VerificationException("Block forks the chain but splitPoint is null");
- } else {
- // We aren't actually spending any transactions (yet) because we are on a fork
- addToBlockStore(storedPrev, block);
- int splitPointHeight = splitPoint.getHeight();
- String splitPointHash = splitPoint.getHeader().getHashAsString();
- log.info("Block forks the chain at height {}/block {}, but it did not cause a reorganize:\n{}",
- splitPointHeight, splitPointHash, newBlock.getHeader().getHashAsString());
- }
- }
-
- // We may not have any transactions if we received only a header, which can happen during fast catchup.
- // If we do, send them to the wallet but state that they are on a side chain so it knows not to try and
- // spend them until they become activated.
- if (block.transactions != null || filtered) {
- informListenersForNewBlock(block, NewBlockType.SIDE_CHAIN, filteredTxHashList, filteredTxn, newBlock);
- }
-
- if (haveNewBestChain)
- handleNewBestChain(storedPrev, newBlock, block, expensiveChecks);
- }
- }
-
- private void informListenersForNewBlock(final Block block, final NewBlockType newBlockType,
- @Nullable final List filteredTxHashList,
- @Nullable final Map filteredTxn,
- final StoredBlock newStoredBlock) throws VerificationException {
- // Notify the listeners of the new block, so the depth and workDone of stored transactions can be updated
- // (in the case of the listener being a wallet). Wallets need to know how deep each transaction is so
- // coinbases aren't used before maturity.
- boolean first = true;
- Set falsePositives = Sets.newHashSet();
- if (filteredTxHashList != null) falsePositives.addAll(filteredTxHashList);
- for (final ListenerRegistration registration : listeners) {
- if (registration.executor == Threading.SAME_THREAD) {
- informListenerForNewTransactions(block, newBlockType, filteredTxHashList, filteredTxn,
- newStoredBlock, first, registration.listener, falsePositives);
- if (newBlockType == NewBlockType.BEST_CHAIN)
- registration.listener.notifyNewBestBlock(newStoredBlock);
- } else {
- // Listener wants to be run on some other thread, so marshal it across here.
- final boolean notFirst = !first;
- registration.executor.execute(new Runnable() {
- @Override
- public void run() {
- try {
- // We can't do false-positive handling when executing on another thread
- Set ignoredFalsePositives = Sets.newHashSet();
- informListenerForNewTransactions(block, newBlockType, filteredTxHashList, filteredTxn,
- newStoredBlock, notFirst, registration.listener, ignoredFalsePositives);
- if (newBlockType == NewBlockType.BEST_CHAIN)
- registration.listener.notifyNewBestBlock(newStoredBlock);
- } catch (VerificationException e) {
- log.error("Block chain listener threw exception: ", e);
- // Don't attempt to relay this back to the original peer thread if this was an async
- // listener invocation.
- // TODO: Make exception reporting a global feature and use it here.
- }
- }
- });
- }
- first = false;
- }
-
- trackFalsePositives(falsePositives.size());
- }
-
- private static void informListenerForNewTransactions(Block block, NewBlockType newBlockType,
- @Nullable List filteredTxHashList,
- @Nullable Map filteredTxn,
- StoredBlock newStoredBlock, boolean first,
- BlockChainListener listener,
- Set falsePositives) throws VerificationException {
- if (block.transactions != null) {
- // If this is not the first wallet, ask for the transactions to be duplicated before being given
- // to the wallet when relevant. This ensures that if we have two connected wallets and a tx that
- // is relevant to both of them, they don't end up accidentally sharing the same object (which can
- // result in temporary in-memory corruption during re-orgs). See bug 257. We only duplicate in
- // the case of multiple wallets to avoid an unnecessary efficiency hit in the common case.
- sendTransactionsToListener(newStoredBlock, newBlockType, listener, 0, block.transactions,
- !first, falsePositives);
- } else if (filteredTxHashList != null) {
- checkNotNull(filteredTxn);
- // We must send transactions to listeners in the order they appeared in the block - thus we iterate over the
- // set of hashes and call sendTransactionsToListener with individual txn when they have not already been
- // seen in loose broadcasts - otherwise notifyTransactionIsInBlock on the hash.
- int relativityOffset = 0;
- for (Sha256Hash hash : filteredTxHashList) {
- Transaction tx = filteredTxn.get(hash);
- if (tx != null) {
- sendTransactionsToListener(newStoredBlock, newBlockType, listener, relativityOffset,
- Arrays.asList(tx), !first, falsePositives);
- } else {
- if (listener.notifyTransactionIsInBlock(hash, newStoredBlock, newBlockType, relativityOffset)) {
- falsePositives.remove(hash);
- }
- }
- relativityOffset++;
- }
- }
- }
-
- /**
- * Gets the median timestamp of the last 11 blocks
- */
- private static long getMedianTimestampOfRecentBlocks(StoredBlock storedBlock,
- BlockStore store) throws BlockStoreException {
- long[] timestamps = new long[11];
- int unused = 9;
- timestamps[10] = storedBlock.getHeader().getTimeSeconds();
- while (unused >= 0 && (storedBlock = storedBlock.getPrev(store)) != null)
- timestamps[unused--] = storedBlock.getHeader().getTimeSeconds();
-
- Arrays.sort(timestamps, unused+1, 11);
- return timestamps[unused + (11-unused)/2];
- }
-
- /**
- * Disconnect each transaction in the block (after reading it from the block store)
- * Only called if(shouldVerifyTransactions())
- * @throws PrunedException if block does not exist as a {@link StoredUndoableBlock} in the block store.
- * @throws BlockStoreException if the block store had an underlying error or block does not exist in the block store at all.
- */
- protected abstract void disconnectTransactions(StoredBlock block) throws PrunedException, BlockStoreException;
-
- /**
- * Called as part of connecting a block when the new block results in a different chain having higher total work.
- *
- * if (shouldVerifyTransactions)
- * Either newChainHead needs to be in the block store as a FullStoredBlock, or (block != null && block.transactions != null)
- */
- private void handleNewBestChain(StoredBlock storedPrev, StoredBlock newChainHead, Block block, boolean expensiveChecks)
- throws BlockStoreException, VerificationException, PrunedException {
- checkState(lock.isHeldByCurrentThread());
- // This chain has overtaken the one we currently believe is best. Reorganize is required.
- //
- // Firstly, calculate the block at which the chain diverged. We only need to examine the
- // chain from beyond this block to find differences.
- StoredBlock head = getChainHead();
- final StoredBlock splitPoint = findSplit(newChainHead, head, blockStore);
- log.info("Re-organize after split at height {}", splitPoint.getHeight());
- log.info("Old chain head: {}", head.getHeader().getHashAsString());
- log.info("New chain head: {}", newChainHead.getHeader().getHashAsString());
- log.info("Split at block: {}", splitPoint.getHeader().getHashAsString());
- // Then build a list of all blocks in the old part of the chain and the new part.
- final LinkedList oldBlocks = getPartialChain(head, splitPoint, blockStore);
- final LinkedList newBlocks = getPartialChain(newChainHead, splitPoint, blockStore);
- // Disconnect each transaction in the previous main chain that is no longer in the new main chain
- StoredBlock storedNewHead = splitPoint;
- if (shouldVerifyTransactions()) {
- for (StoredBlock oldBlock : oldBlocks) {
- try {
- disconnectTransactions(oldBlock);
- } catch (PrunedException e) {
- // We threw away the data we need to re-org this deep! We need to go back to a peer with full
- // block contents and ask them for the relevant data then rebuild the indexs. Or we could just
- // give up and ask the human operator to help get us unstuck (eg, rescan from the genesis block).
- // TODO: Retry adding this block when we get a block with hash e.getHash()
- throw e;
- }
- }
- StoredBlock cursor;
- // Walk in ascending chronological order.
- for (Iterator it = newBlocks.descendingIterator(); it.hasNext();) {
- cursor = it.next();
- if (expensiveChecks && cursor.getHeader().getTimeSeconds() <= getMedianTimestampOfRecentBlocks(cursor.getPrev(blockStore), blockStore))
- throw new VerificationException("Block's timestamp is too early during reorg");
- TransactionOutputChanges txOutChanges;
- if (cursor != newChainHead || block == null)
- txOutChanges = connectTransactions(cursor);
- else
- txOutChanges = connectTransactions(newChainHead.getHeight(), block);
- storedNewHead = addToBlockStore(storedNewHead, cursor.getHeader(), txOutChanges);
- }
- } else {
- // (Finally) write block to block store
- storedNewHead = addToBlockStore(storedPrev, newChainHead.getHeader());
- }
- // Now inform the listeners. This is necessary so the set of currently active transactions (that we can spend)
- // can be updated to take into account the re-organize. We might also have received new coins we didn't have
- // before and our previous spends might have been undone.
- for (final ListenerRegistration registration : listeners) {
- if (registration.executor == Threading.SAME_THREAD) {
- // Short circuit the executor so we can propagate any exceptions.
- // TODO: Do we really need to do this or should it be irrelevant?
- registration.listener.reorganize(splitPoint, oldBlocks, newBlocks);
- } else {
- registration.executor.execute(new Runnable() {
- @Override
- public void run() {
- try {
- registration.listener.reorganize(splitPoint, oldBlocks, newBlocks);
- } catch (VerificationException e) {
- log.error("Block chain listener threw exception during reorg", e);
- }
- }
- });
- }
- }
- // Update the pointer to the best known block.
- setChainHead(storedNewHead);
- }
-
- /**
- * Returns the set of contiguous blocks between 'higher' and 'lower'. Higher is included, lower is not.
- */
- private static LinkedList getPartialChain(StoredBlock higher, StoredBlock lower, BlockStore store) throws BlockStoreException {
- checkArgument(higher.getHeight() > lower.getHeight(), "higher and lower are reversed");
- LinkedList results = new LinkedList();
- StoredBlock cursor = higher;
- while (true) {
- results.add(cursor);
- cursor = checkNotNull(cursor.getPrev(store), "Ran off the end of the chain");
- if (cursor.equals(lower)) break;
- }
- return results;
- }
-
- /**
- * Locates the point in the chain at which newStoredBlock and chainHead diverge. Returns null if no split point was
- * found (ie they are not part of the same chain). Returns newChainHead or chainHead if they don't actually diverge
- * but are part of the same chain.
- */
- private static StoredBlock findSplit(StoredBlock newChainHead, StoredBlock oldChainHead,
- BlockStore store) throws BlockStoreException {
- StoredBlock currentChainCursor = oldChainHead;
- StoredBlock newChainCursor = newChainHead;
- // Loop until we find the block both chains have in common. Example:
- //
- // A -> B -> C -> D
- // \--> E -> F -> G
- //
- // findSplit will return block B. oldChainHead = D and newChainHead = G.
- while (!currentChainCursor.equals(newChainCursor)) {
- if (currentChainCursor.getHeight() > newChainCursor.getHeight()) {
- currentChainCursor = currentChainCursor.getPrev(store);
- checkNotNull(currentChainCursor, "Attempt to follow an orphan chain");
- } else {
- newChainCursor = newChainCursor.getPrev(store);
- checkNotNull(newChainCursor, "Attempt to follow an orphan chain");
- }
- }
- return currentChainCursor;
- }
-
- /**
- * @return the height of the best known chain, convenience for getChainHead().getHeight().
- */
- public int getBestChainHeight() {
- return getChainHead().getHeight();
- }
-
- public enum NewBlockType {
- BEST_CHAIN,
- SIDE_CHAIN
- }
-
- private static void sendTransactionsToListener(StoredBlock block, NewBlockType blockType,
- BlockChainListener listener,
- int relativityOffset,
- List transactions,
- boolean clone,
- Set falsePositives) throws VerificationException {
- for (Transaction tx : transactions) {
- try {
- if (listener.isTransactionRelevant(tx)) {
- falsePositives.remove(tx.getHash());
- if (clone)
- tx = new Transaction(tx.params, tx.bitcoinSerialize());
- listener.receiveFromBlock(tx, block, blockType, relativityOffset++);
- }
- } catch (ScriptException e) {
- // We don't want scripts we don't understand to break the block chain so just note that this tx was
- // not scanned here and continue.
- log.warn("Failed to parse a script: " + e.toString());
- } catch (ProtocolException e) {
- // Failed to duplicate tx, should never happen.
- throw new RuntimeException(e);
- }
- }
- }
-
- protected void setChainHead(StoredBlock chainHead) throws BlockStoreException {
- doSetChainHead(chainHead);
- synchronized (chainHeadLock) {
- this.chainHead = chainHead;
- }
- }
-
- /**
- * For each block in orphanBlocks, see if we can now fit it on top of the chain and if so, do so.
- */
- private void tryConnectingOrphans() throws VerificationException, BlockStoreException, PrunedException {
- checkState(lock.isHeldByCurrentThread());
- // For each block in our orphan list, try and fit it onto the head of the chain. If we succeed remove it
- // from the list and keep going. If we changed the head of the list at the end of the round try again until
- // we can't fit anything else on the top.
- //
- // This algorithm is kind of crappy, we should do a topo-sort then just connect them in order, but for small
- // numbers of orphan blocks it does OK.
- int blocksConnectedThisRound;
- do {
- blocksConnectedThisRound = 0;
- Iterator iter = orphanBlocks.values().iterator();
- while (iter.hasNext()) {
- OrphanBlock orphanBlock = iter.next();
- // Look up the blocks previous.
- StoredBlock prev = getStoredBlockInCurrentScope(orphanBlock.block.getPrevBlockHash());
- if (prev == null) {
- // This is still an unconnected/orphan block.
- log.debug(" but it is not connectable right now");
- continue;
- }
- // Otherwise we can connect it now.
- // False here ensures we don't recurse infinitely downwards when connecting huge chains.
- log.info("Connected orphan {}", orphanBlock.block.getHash());
- add(orphanBlock.block, false, orphanBlock.filteredTxHashes, orphanBlock.filteredTxn);
- iter.remove();
- blocksConnectedThisRound++;
- }
- if (blocksConnectedThisRound > 0) {
- log.info("Connected {} orphan blocks.", blocksConnectedThisRound);
- }
- } while (blocksConnectedThisRound > 0);
- }
-
- // February 16th 2012
- private static final Date testnetDiffDate = new Date(1329264000000L);
- private static final int testNetRetargetFix = 157500;
-
- /**
- * Throws an exception if the blocks difficulty is not correct.
- */
- private void checkDifficultyTransitions(StoredBlock storedPrev, Block nextBlock) throws BlockStoreException, VerificationException {
- checkState(lock.isHeldByCurrentThread());
- Block prev = storedPrev.getHeader();
-
- boolean newDiffAlgo = storedPrev.getHeight() + 1 >= params.getDiffChangeTarget();
- int retargetInterval = params.getInterval();
- int retargetTimespan = params.getTargetTimespan();
- if (newDiffAlgo)
- {
- retargetInterval = params.getNewInterval();
- retargetTimespan = params.getNewTargetTimespan();
- }
-
- if (params.getId().equals(NetworkParameters.ID_TESTNET)
- && storedPrev.getHeight() >= testNetRetargetFix
- && nextBlock.getTimeSeconds() > storedPrev.getHeader().getTimeSeconds() + retargetTimespan * 2 ) {
- checkTestnetDifficulty(storedPrev, prev, nextBlock);
- return;
- }
-
- // Is this supposed to be a difficulty transition point?
- if ((storedPrev.getHeight() + 1) % retargetInterval != 0) {
-
- // TODO: Refactor this hack after 0.5 is released and we stop supporting deserialization compatibility.
- // This should be a method of the NetworkParameters, which should in turn be using singletons and a subclass
- // for each network type. Then each network can define its own difficulty transition rules.
- if (params.getId().equals(NetworkParameters.ID_TESTNET) && nextBlock.getTime().after(testnetDiffDate)) {
- checkTestnetDifficulty(storedPrev, prev, nextBlock);
- return;
- }
-
- // No ... so check the difficulty didn't actually change.
- if (nextBlock.getDifficultyTarget() != prev.getDifficultyTarget())
- throw new VerificationException("Unexpected change in difficulty at height " + storedPrev.getHeight() +
- ": " + Long.toHexString(nextBlock.getDifficultyTarget()) + " vs " +
- Long.toHexString(prev.getDifficultyTarget()));
- return;
- }
-
- // We need to find a block far back in the chain. It's OK that this is expensive because it only occurs every
- // two weeks after the initial block chain download.
- long now = System.currentTimeMillis();
- StoredBlock cursor = blockStore.get(prev.getHash());
- int goBack = retargetInterval - 1;
- if (cursor.getHeight()+1 != retargetInterval)
- goBack = retargetInterval;
-
- for (int i = 0; i < goBack; i++) {
- if (cursor == null) {
- // This should never happen. If it does, it means we are following an incorrect or busted chain.
- throw new VerificationException(
- "Difficulty transition point but we did not find a way back to the genesis block.");
- }
- cursor = blockStore.get(cursor.getHeader().getPrevBlockHash());
- }
-
- //We used checkpoints...
- if(cursor == null)
- {
- log.debug("Difficulty transition: Hit checkpoint!");
- return;
- }
-
- long elapsed = System.currentTimeMillis() - now;
- if (elapsed > 50)
- log.info("Difficulty transition traversal took {}msec", elapsed);
-
- Block blockIntervalAgo = cursor.getHeader();
- int timespan = (int) (prev.getTimeSeconds() - blockIntervalAgo.getTimeSeconds());
- final int targetTimespan = retargetTimespan;
-
- if (newDiffAlgo)
- {
- timespan = retargetTimespan + (timespan - retargetTimespan)/8;
- if (timespan < (retargetTimespan - (retargetTimespan/4)) ) timespan = (retargetTimespan - (retargetTimespan/4));
- if (timespan > (retargetTimespan + (retargetTimespan/2)) ) timespan = (retargetTimespan + (retargetTimespan/2));
- }
- // Limit the adjustment step.
- else if (storedPrev.getHeight()+1 > 10000)
- {
- if (timespan < targetTimespan / 4)
- timespan = targetTimespan / 4;
- if (timespan > targetTimespan * 4)
- timespan = targetTimespan * 4;
- }
- else if (storedPrev.getHeight()+1 > 5000)
- {
- if (timespan < targetTimespan / 8)
- timespan = targetTimespan / 8;
- if (timespan > targetTimespan * 4)
- timespan = targetTimespan * 4;
- }
- else
- {
- if (timespan < targetTimespan / 16)
- timespan = targetTimespan / 16;
- if (timespan > targetTimespan * 4)
- timespan = targetTimespan * 4;
- }
-
- BigInteger newTarget = Utils.decodeCompactBits(prev.getDifficultyTarget());
- newTarget = newTarget.multiply(BigInteger.valueOf(timespan));
- newTarget = newTarget.divide(BigInteger.valueOf(targetTimespan));
-
- if (newTarget.compareTo(params.getMaxTarget()) > 0) {
- log.info("Difficulty hit proof of work limit: {}", newTarget.toString(16));
- newTarget = params.getMaxTarget();
- }
-
- int accuracyBytes = (int) (nextBlock.getDifficultyTarget() >>> 24) - 3;
- long receivedTargetCompact = nextBlock.getDifficultyTarget();
-
- // The calculated difficulty is to a higher precision than received, so reduce here.
- BigInteger mask = BigInteger.valueOf(0xFFFFFFL).shiftLeft(accuracyBytes * 8);
- newTarget = newTarget.and(mask);
- long newTargetCompact = Utils.encodeCompactBits(newTarget);
-
- if (newTargetCompact != receivedTargetCompact)
- throw new VerificationException("Network provided difficulty bits do not match what was calculated: " +
- newTargetCompact + " vs " + receivedTargetCompact);
- }
-
- private void checkTestnetDifficulty(StoredBlock storedPrev, Block prev, Block next) throws VerificationException, BlockStoreException {
- checkState(lock.isHeldByCurrentThread());
- // After 15th February 2012 the rules on the testnet change to avoid people running up the difficulty
- // and then leaving, making it too hard to mine a block. On non-difficulty transition points, easy
- // blocks are allowed if there has been a span of 20 minutes without one.
- final long timeDelta = next.getTimeSeconds() - prev.getTimeSeconds();
- // There is an integer underflow bug in bitcoin-qt that means mindiff blocks are accepted when time
- // goes backwards.
- if (timeDelta >= 0 && timeDelta <= NetworkParameters.TARGET_SPACING * 2) {
- // Walk backwards until we find a block that doesn't have the easiest proof of work, then check
- // that difficulty is equal to that one.
- StoredBlock cursor = storedPrev;
- while (!cursor.getHeader().equals(params.getGenesisBlock()) &&
- cursor.getHeight() % params.getInterval() != 0 &&
- cursor.getHeader().getDifficultyTargetAsInteger().equals(params.getMaxTarget()))
- cursor = cursor.getPrev(blockStore);
- BigInteger cursorTarget = cursor.getHeader().getDifficultyTargetAsInteger();
- BigInteger newTarget = next.getDifficultyTargetAsInteger();
- if (!cursorTarget.equals(newTarget))
- throw new VerificationException("Testnet block transition that is not allowed: " +
- Long.toHexString(cursor.getHeader().getDifficultyTarget()) + " vs " +
- Long.toHexString(next.getDifficultyTarget()));
- }
- }
-
- /**
- * Returns true if any connected wallet considers any transaction in the block to be relevant.
- */
- private boolean containsRelevantTransactions(Block block) {
- // Does not need to be locked.
- for (Transaction tx : block.transactions) {
- try {
- for (final ListenerRegistration registration : listeners) {
- if (registration.executor != Threading.SAME_THREAD) continue;
- if (registration.listener.isTransactionRelevant(tx)) return true;
- }
- } catch (ScriptException e) {
- // We don't want scripts we don't understand to break the block chain so just note that this tx was
- // not scanned here and continue.
- log.warn("Failed to parse a script: " + e.toString());
- }
- }
- return false;
- }
-
- /**
- * Returns the block at the head of the current best chain. This is the block which represents the greatest
- * amount of cumulative work done.
- */
- public StoredBlock getChainHead() {
- synchronized (chainHeadLock) {
- return chainHead;
- }
- }
-
- /**
- * An orphan block is one that does not connect to the chain anywhere (ie we can't find its parent, therefore
- * it's an orphan). Typically this occurs when we are downloading the chain and didn't reach the head yet, and/or
- * if a block is solved whilst we are downloading. It's possible that we see a small amount of orphan blocks which
- * chain together, this method tries walking backwards through the known orphan blocks to find the bottom-most.
- *
- * @return from or one of froms parents, or null if "from" does not identify an orphan block
- */
- @Nullable
- public Block getOrphanRoot(Sha256Hash from) {
- lock.lock();
- try {
- OrphanBlock cursor = orphanBlocks.get(from);
- if (cursor == null)
- return null;
- OrphanBlock tmp;
- while ((tmp = orphanBlocks.get(cursor.block.getPrevBlockHash())) != null) {
- cursor = tmp;
- }
- return cursor.block;
- } finally {
- lock.unlock();
- }
- }
-
- /** Returns true if the given block is currently in the orphan blocks list. */
- public boolean isOrphan(Sha256Hash block) {
- lock.lock();
- try {
- return orphanBlocks.containsKey(block);
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Returns an estimate of when the given block will be reached, assuming a perfect 10 minute average for each
- * block. This is useful for turning transaction lock times into human readable times. Note that a height in
- * the past will still be estimated, even though the time of solving is actually known (we won't scan backwards
- * through the chain to obtain the right answer).
- */
- public Date estimateBlockTime(int height) {
- synchronized (chainHeadLock) {
- long offset = height - chainHead.getHeight();
- long headTime = chainHead.getHeader().getTimeSeconds();
- long estimated = (headTime * 1000) + (1000L * 60L * 10L * offset);
- return new Date(estimated);
- }
- }
-
- /**
- * Returns a future that completes when the block chain has reached the given height. Yields the
- * {@link StoredBlock} of the block that reaches that height first. The future completes on a peer thread.
- */
- public ListenableFuture getHeightFuture(final int height) {
- final SettableFuture result = SettableFuture.create();
- addListener(new AbstractBlockChainListener() {
- @Override
- public void notifyNewBestBlock(StoredBlock block) throws VerificationException {
- if (block.getHeight() >= height) {
- removeListener(this);
- result.set(block);
- }
- }
- }, Threading.SAME_THREAD);
- return result;
- }
-
-
-
- /**
- * The false positive rate is the average over all blockchain transactions of:
- *
- * - 1.0 if the transaction was false-positive (was irrelevant to all listeners)
- * - 0.0 if the transaction was relevant or filtered out
- */
- public double getFalsePositiveRate() {
- return falsePositiveRate;
- }
-
- /*
- * We completed handling of a filtered block. Update false-positive estimate based
- * on the total number of transactions in the original block.
- *
- * count includes filtered transactions, transactions that were passed in and were relevant
- * and transactions that were false positives (i.e. includes all transactions in the block).
- */
- protected void trackFilteredTransactions(int count) {
- // Track non-false-positives in batch. Each non-false-positive counts as
- // 0.0 towards the estimate.
- //
- // This is slightly off because we are applying false positive tracking before non-FP tracking,
- // which counts FP as if they came at the beginning of the block. Assuming uniform FP
- // spread in a block, this will somewhat underestimate the FP rate (5% for 1000 tx block).
- double alphaDecay = Math.pow(1 - FP_ESTIMATOR_ALPHA, count);
-
- // new_rate = alpha_decay * new_rate
- falsePositiveRate = alphaDecay * falsePositiveRate;
-
- double betaDecay = Math.pow(1 - FP_ESTIMATOR_BETA, count);
-
- // trend = beta * (new_rate - old_rate) + beta_decay * trend
- falsePositiveTrend =
- FP_ESTIMATOR_BETA * count * (falsePositiveRate - previousFalsePositiveRate) +
- betaDecay * falsePositiveTrend;
-
- // new_rate += alpha_decay * trend
- falsePositiveRate += alphaDecay * falsePositiveTrend;
-
- // Stash new_rate in old_rate
- previousFalsePositiveRate = falsePositiveRate;
- }
-
- /* Irrelevant transactions were received. Update false-positive estimate. */
- void trackFalsePositives(int count) {
- // Track false positives in batch by adding alpha to the false positive estimate once per count.
- // Each false positive counts as 1.0 towards the estimate.
- falsePositiveRate += FP_ESTIMATOR_ALPHA * count;
- if (count > 0)
- log.debug("{} false positives, current rate = {} trend = {}", count, falsePositiveRate, falsePositiveTrend);
- }
-
- /** Resets estimates of false positives. Used when the filter is sent to the peer. */
- public void resetFalsePositiveEstimate() {
- falsePositiveRate = 0;
- falsePositiveTrend = 0;
- previousFalsePositiveRate = 0;
- }
-}
diff --git a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChainListener.java b/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChainListener.java
deleted file mode 100644
index bf7fa6a2..00000000
--- a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractBlockChainListener.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Copyright 2013 Google Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dogecoin.dogecoinj.core;
-
-import java.util.List;
-
-/**
- * Default no-op implementation of {@link BlockChainListener}.
- */
-public class AbstractBlockChainListener implements BlockChainListener {
- @Override
- public void notifyNewBestBlock(StoredBlock block) throws VerificationException {
- }
-
- @Override
- public void reorganize(StoredBlock splitPoint, List oldBlocks, List newBlocks) throws VerificationException {
- }
-
- @Override
- public boolean isTransactionRelevant(Transaction tx) throws ScriptException {
- return false;
- }
-
- @Override
- public void receiveFromBlock(Transaction tx, StoredBlock block, BlockChain.NewBlockType blockType,
- int relativityOffset) throws VerificationException {
- }
-
- @Override
- public boolean notifyTransactionIsInBlock(Sha256Hash txHash, StoredBlock block, BlockChain.NewBlockType blockType,
- int relativityOffset) throws VerificationException {
- return false;
- }
-}
diff --git a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractPeerEventListener.java b/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractPeerEventListener.java
deleted file mode 100644
index 0487c75d..00000000
--- a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractPeerEventListener.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Copyright 2011 Google Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dogecoin.dogecoinj.core;
-
-import java.util.List;
-import java.util.Set;
-
-/**
- * Convenience implementation of {@link PeerEventListener}.
- */
-public class AbstractPeerEventListener implements PeerEventListener {
- @Override
- public void onPeersDiscovered(Set peerAddresses) {
- }
-
- @Override
- public void onBlocksDownloaded(Peer peer, Block block, int blocksLeft) {
- }
-
- @Override
- public void onChainDownloadStarted(Peer peer, int blocksLeft) {
- }
-
- @Override
- public void onPeerConnected(Peer peer, int peerCount) {
- }
-
- @Override
- public void onPeerDisconnected(Peer peer, int peerCount) {
- }
-
- @Override
- public Message onPreMessageReceived(Peer peer, Message m) {
- // Just pass the message right through for further processing.
- return m;
- }
-
- @Override
- public void onTransaction(Peer peer, Transaction t) {
- }
-
- @Override
- public List getData(Peer peer, GetDataMessage m) {
- return null;
- }
-}
diff --git a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractWalletEventListener.java b/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractWalletEventListener.java
deleted file mode 100644
index e3fd8863..00000000
--- a/core/src/main/java/com/dogecoin/dogecoinj/core/AbstractWalletEventListener.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright 2011 Google Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dogecoin.dogecoinj.core;
-
-import com.dogecoin.dogecoinj.script.Script;
-import com.dogecoin.dogecoinj.wallet.AbstractKeyChainEventListener;
-
-import java.util.List;
-
-/**
- * Convenience implementation of {@link WalletEventListener}.
- */
-public abstract class AbstractWalletEventListener extends AbstractKeyChainEventListener implements WalletEventListener {
- @Override
- public void onCoinsReceived(Wallet wallet, Transaction tx, Coin prevBalance, Coin newBalance) {
- onChange();
- }
-
- @Override
- public void onCoinsSent(Wallet wallet, Transaction tx, Coin prevBalance, Coin newBalance) {
- onChange();
- }
-
- @Override
- public void onReorganize(Wallet wallet) {
- onChange();
- }
-
- @Override
- public void onTransactionConfidenceChanged(Wallet wallet, Transaction tx) {
- onChange();
- }
-
- @Override
- public void onKeysAdded(List keys) {
- onChange();
- }
-
- @Override
- public void onScriptsChanged(Wallet wallet, List