mirror of
https://github.com/Qortal/altcoinj.git
synced 2025-07-29 11:01:22 +00:00
Add BoundedOverheadBlockStore. This is suitable for usage on constrained devices like Android phones that cannot afford to store the chain headers in RAM.
On a MacBook pro with this store, the chain can be downloaded at about 2000 blocks per second. Difficulty transitions are not optimized but take around 50 msec. On a Google Nexus S the chain can be downloaded (via wifi) at 200 blocks per second and difficulty traversals take 2 seconds. At this time the profiles on both devices are dominated by verification costs, not storage.
This commit is contained in:
@@ -75,7 +75,7 @@ public class BlockChain {
|
||||
* <p>
|
||||
*
|
||||
* For the store you can use a {@link MemoryBlockStore} if you don't care about saving the downloaded data, or a
|
||||
* {@link DiskBlockStore} if you'd like to ensure fast startup the next time you run the program.
|
||||
* {@link BoundedOverheadBlockStore} if you'd like to ensure fast startup the next time you run the program.
|
||||
*/
|
||||
public BlockChain(NetworkParameters params, Wallet wallet, BlockStore blockStore) {
|
||||
try {
|
||||
@@ -104,13 +104,19 @@ public class BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
// Stat counters.
|
||||
private long statsLastTime = System.currentTimeMillis();
|
||||
private long statsBlocksAdded;
|
||||
|
||||
private synchronized boolean add(Block block, boolean tryConnecting)
|
||||
throws BlockStoreException, VerificationException, ScriptException {
|
||||
log.info("Adding block " + block.getHashAsString() + " to the chain");
|
||||
if (blockStore.get(block.getHash()) != null) {
|
||||
log.info("Already have block");
|
||||
return true;
|
||||
if (System.currentTimeMillis() - statsLastTime > 1000) {
|
||||
// More than a second passed since last stats logging.
|
||||
log.info("{} blocks per second", statsBlocksAdded);
|
||||
statsLastTime = System.currentTimeMillis();
|
||||
statsBlocksAdded = 0;
|
||||
}
|
||||
// We don't check for double adds here to avoid potentially expensive block chain misses.
|
||||
|
||||
// Prove the block is internally valid: hash is lower than target, merkle root is correct and so on.
|
||||
try {
|
||||
@@ -136,8 +142,8 @@ public class BlockChain {
|
||||
//
|
||||
// Create a new StoredBlock from this block. It will throw away the transaction data so when block goes
|
||||
// out of scope we will reclaim the used memory.
|
||||
checkDifficultyTransitions(storedPrev, block);
|
||||
StoredBlock newStoredBlock = storedPrev.build(block);
|
||||
checkDifficultyTransitions(storedPrev, newStoredBlock);
|
||||
blockStore.put(newStoredBlock);
|
||||
// block.transactions may be null here if we received only a header and not a full block. This does not
|
||||
// happen currently but might in future if getheaders is implemented.
|
||||
@@ -147,6 +153,7 @@ public class BlockChain {
|
||||
if (tryConnecting)
|
||||
tryConnectingUnconnected();
|
||||
|
||||
statsBlocksAdded++;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -308,9 +315,10 @@ public class BlockChain {
|
||||
/**
|
||||
* Throws an exception if the blocks difficulty is not correct.
|
||||
*/
|
||||
private void checkDifficultyTransitions(StoredBlock storedPrev, Block next)
|
||||
private void checkDifficultyTransitions(StoredBlock storedPrev, StoredBlock storedNext)
|
||||
throws BlockStoreException, VerificationException {
|
||||
Block prev = storedPrev.getHeader();
|
||||
Block next = storedNext.getHeader();
|
||||
// Is this supposed to be a difficulty transition point?
|
||||
if ((storedPrev.getHeight() + 1) % params.interval != 0) {
|
||||
// No ... so check the difficulty didn't actually change.
|
||||
@@ -323,6 +331,7 @@ public class BlockChain {
|
||||
|
||||
// We need to find a block far back in the chain. It's OK that this is expensive because it only occurs every
|
||||
// two weeks after the initial block chain download.
|
||||
long now = System.currentTimeMillis();
|
||||
StoredBlock cursor = blockStore.get(prev.getHash());
|
||||
for (int i = 0; i < params.interval - 1; i++) {
|
||||
if (cursor == null) {
|
||||
@@ -332,6 +341,7 @@ public class BlockChain {
|
||||
}
|
||||
cursor = blockStore.get(cursor.getHeader().getPrevBlockHash());
|
||||
}
|
||||
log.info("Difficulty transition traversal took {}msec", System.currentTimeMillis() - now);
|
||||
|
||||
Block blockIntervalAgo = cursor.getHeader();
|
||||
int timespan = (int) (prev.getTime() - blockIntervalAgo.getTime());
|
||||
|
284
src/com/google/bitcoin/core/BoundedOverheadBlockStore.java
Normal file
284
src/com/google/bitcoin/core/BoundedOverheadBlockStore.java
Normal file
@@ -0,0 +1,284 @@
|
||||
/**
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.google.bitcoin.core;
|
||||
|
||||
import java.io.*;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.util.*;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Stores the block chain to disk.<p>
|
||||
*
|
||||
* This implementation is designed to have constant memory usage, regardless of the size of the block chain being
|
||||
* stored. It exploits operating system level buffering and the fact that get() requests are, in normal usage,
|
||||
* localized in chain space.<p>
|
||||
*
|
||||
* Blocks are stored sequentially. Most blocks are fetched out of a small in-memory cache. The slowest part is
|
||||
* traversing difficulty transition points, which requires seeking backwards over around 2000 blocks. On a Google
|
||||
* Nexus S phone this takes a couple of seconds. On a MacBook Pro it takes around 50msec.<p>
|
||||
*
|
||||
* The store has much room for optimization. Expanding the size of the cache will likely allow us to traverse
|
||||
* difficulty transitions without using too much memory and without hitting the disk at all, for the case of initial
|
||||
* block chain download. Storing the hashes on disk would allow us to avoid deserialization and hashing which is
|
||||
* expensive on Android.
|
||||
*/
|
||||
public class BoundedOverheadBlockStore implements BlockStore {
|
||||
private static final Logger log = LoggerFactory.getLogger(BoundedOverheadBlockStore.class);
|
||||
private static final byte FILE_FORMAT_VERSION = 1;
|
||||
|
||||
private RandomAccessFile file;
|
||||
// We keep some recently found blocks in the blockCache. It can help to optimize some cases where we are
|
||||
// looking up blocks we recently stored or requested. When the cache gets too big older entries are deleted.
|
||||
private LinkedHashMap<Sha256Hash, StoredBlock> blockCache = new LinkedHashMap<Sha256Hash, StoredBlock>() {
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry<Sha256Hash, StoredBlock> entry) {
|
||||
return size() > 100; // This was chosen arbitrarily.
|
||||
}
|
||||
};
|
||||
// Use a separate cache to track get() misses. This is to efficiently handle the case of an unconnected block
|
||||
// during chain download. Each new block will do a get() on the unconnected block so if we haven't seen it yet we
|
||||
// must efficiently respond.
|
||||
//
|
||||
// We don't care about the value in this cache. It is always notFoundMarker. Unfortunately LinkedHashSet does not
|
||||
// provide the removeEldestEntry control.
|
||||
private StoredBlock notFoundMarker;
|
||||
private LinkedHashMap<Sha256Hash, StoredBlock> notFoundCache = new LinkedHashMap<Sha256Hash, StoredBlock>() {
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry<Sha256Hash, StoredBlock> entry) {
|
||||
return size() > 100; // This was chosen arbitrarily.
|
||||
}
|
||||
};
|
||||
|
||||
private Sha256Hash chainHead;
|
||||
private NetworkParameters params;
|
||||
private FileChannel channel;
|
||||
|
||||
private class Record {
|
||||
// A BigInteger representing the total amount of work done so far on this chain. As of May 2011 it takes 8
|
||||
// bytes to represent this field, so 16 bytes should be plenty for a long time.
|
||||
private static final int CHAIN_WORK_BYTES = 16;
|
||||
private final byte[] EMPTY_BYTES = new byte[CHAIN_WORK_BYTES];
|
||||
|
||||
private int height; // 4 bytes
|
||||
private byte[] chainWork; // 16 bytes
|
||||
private byte[] blockHeader; // 80 bytes
|
||||
|
||||
public static final int SIZE = 4 + Record.CHAIN_WORK_BYTES + Block.HEADER_SIZE;
|
||||
|
||||
public Record() {
|
||||
height = 0;
|
||||
chainWork = new byte[CHAIN_WORK_BYTES];
|
||||
blockHeader = new byte[Block.HEADER_SIZE];
|
||||
}
|
||||
|
||||
// This should be static but the language does not allow for it.
|
||||
public void write(FileChannel channel, StoredBlock block) throws IOException {
|
||||
ByteBuffer buf = ByteBuffer.allocate(Record.SIZE);
|
||||
buf.putInt(block.getHeight());
|
||||
byte[] chainWorkBytes = block.getChainWork().toByteArray();
|
||||
assert chainWorkBytes.length <= CHAIN_WORK_BYTES : "Ran out of space to store chain work!";
|
||||
if (chainWorkBytes.length < CHAIN_WORK_BYTES) {
|
||||
// Pad to the right size.
|
||||
buf.put(EMPTY_BYTES, 0, CHAIN_WORK_BYTES - chainWorkBytes.length);
|
||||
}
|
||||
buf.put(chainWorkBytes);
|
||||
buf.put(block.getHeader().bitcoinSerialize());
|
||||
buf.position(0);
|
||||
channel.position(channel.size());
|
||||
if (channel.write(buf) < Record.SIZE)
|
||||
throw new IOException("Failed to write record!");
|
||||
channel.position(channel.size() - Record.SIZE);
|
||||
}
|
||||
|
||||
public boolean read(FileChannel channel, long position, ByteBuffer buffer) throws IOException {
|
||||
buffer.position(0);
|
||||
long bytesRead = channel.read(buffer, position);
|
||||
if (bytesRead < Record.SIZE)
|
||||
return false;
|
||||
buffer.position(0);
|
||||
height = buffer.getInt();
|
||||
buffer.get(chainWork);
|
||||
buffer.get(blockHeader);
|
||||
return true;
|
||||
}
|
||||
|
||||
public BigInteger getChainWork() {
|
||||
return new BigInteger(1, chainWork);
|
||||
}
|
||||
|
||||
public Block getHeader(NetworkParameters params) throws ProtocolException {
|
||||
return new Block(params, blockHeader);
|
||||
}
|
||||
|
||||
public int getHeight() {
|
||||
return height;
|
||||
}
|
||||
|
||||
public StoredBlock toStoredBlock(NetworkParameters params) throws ProtocolException {
|
||||
return new StoredBlock(getHeader(params), getChainWork(), getHeight());
|
||||
}
|
||||
}
|
||||
|
||||
public BoundedOverheadBlockStore(NetworkParameters params, File file) throws BlockStoreException {
|
||||
this.params = params;
|
||||
notFoundMarker = new StoredBlock(null, null, -1);
|
||||
try {
|
||||
load(file);
|
||||
} catch (Exception e) {
|
||||
log.error("failed to load block store from file", e);
|
||||
createNewStore(params, file);
|
||||
}
|
||||
}
|
||||
|
||||
private void createNewStore(NetworkParameters params, File file) throws BlockStoreException {
|
||||
// Create a new block store if the file wasn't found or anything went wrong whilst reading.
|
||||
blockCache.clear();
|
||||
try {
|
||||
file.delete();
|
||||
this.file = new RandomAccessFile(file, "rw"); // Create fresh.
|
||||
this.channel = this.file.getChannel();
|
||||
this.file.write(FILE_FORMAT_VERSION);
|
||||
} catch (IOException e1) {
|
||||
// We could not load a block store nor could we create a new one!
|
||||
throw new BlockStoreException(e1);
|
||||
}
|
||||
try {
|
||||
// Set up the genesis block. When we start out fresh, it is by definition the top of the chain.
|
||||
Block genesis = params.genesisBlock.cloneAsHeader();
|
||||
StoredBlock storedGenesis = new StoredBlock(genesis, genesis.getWork(), 0);
|
||||
this.chainHead = new Sha256Hash(storedGenesis.getHeader().getHash());
|
||||
this.file.write(this.chainHead.hash);
|
||||
put(storedGenesis);
|
||||
} catch (VerificationException e1) {
|
||||
throw new RuntimeException(e1); // Cannot happen.
|
||||
} catch (IOException e) {
|
||||
throw new BlockStoreException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void load(File file) throws IOException, BlockStoreException {
|
||||
log.info("Reading block store from {}", file);
|
||||
this.file = new RandomAccessFile(file, "rw");
|
||||
channel = this.file.getChannel();
|
||||
// Read a version byte.
|
||||
int version = this.file.read();
|
||||
if (version == -1) {
|
||||
// No such file or the file was empty.
|
||||
throw new FileNotFoundException(file.getName() + " does not exist or is empty");
|
||||
}
|
||||
if (version != FILE_FORMAT_VERSION) {
|
||||
throw new BlockStoreException("Bad version number: " + version);
|
||||
}
|
||||
// Chain head pointer is the first thing in the file.
|
||||
byte[] chainHeadHash = new byte[32];
|
||||
this.file.read(chainHeadHash);
|
||||
this.chainHead = new Sha256Hash(chainHeadHash);
|
||||
log.info("Read chain head from disk: {}", this.chainHead);
|
||||
channel.position(channel.size() - Record.SIZE);
|
||||
}
|
||||
|
||||
// TODO: This is ugly, fixinate!
|
||||
private Record dummyRecord = new Record();
|
||||
|
||||
public synchronized void put(StoredBlock block) throws BlockStoreException {
|
||||
try {
|
||||
Sha256Hash hash = new Sha256Hash(block.getHeader().getHash());
|
||||
// Append to the end of the file.
|
||||
dummyRecord.write(channel, block);
|
||||
blockCache.put(hash, block);
|
||||
} catch (IOException e) {
|
||||
throw new BlockStoreException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized StoredBlock get(byte[] hashBytes) throws BlockStoreException {
|
||||
// Check the memory cache first.
|
||||
Sha256Hash hash = new Sha256Hash(hashBytes);
|
||||
StoredBlock fromMem = blockCache.get(hash);
|
||||
if (fromMem != null) {
|
||||
return fromMem;
|
||||
}
|
||||
if (notFoundCache.get(hash) == notFoundMarker) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
Record fromDisk = getRecord(hash);
|
||||
StoredBlock block = null;
|
||||
if (fromDisk == null) {
|
||||
notFoundCache.put(hash, notFoundMarker);
|
||||
} else {
|
||||
block = fromDisk.toStoredBlock(params);
|
||||
blockCache.put(hash, block);
|
||||
}
|
||||
return block;
|
||||
} catch (IOException e) {
|
||||
throw new BlockStoreException(e);
|
||||
} catch (ProtocolException e) {
|
||||
throw new BlockStoreException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private ByteBuffer buf = ByteBuffer.allocateDirect(Record.SIZE);
|
||||
private Record getRecord(Sha256Hash hash) throws BlockStoreException, IOException, ProtocolException {
|
||||
long startPos = channel.position();
|
||||
// Use our own file pointer within the tight loop as updating channel positions is really expensive.
|
||||
long pos = startPos;
|
||||
Record record = new Record();
|
||||
do {
|
||||
if (!record.read(channel, pos, buf))
|
||||
throw new IOException("Failed to read buffer");
|
||||
if (Arrays.equals(record.getHeader(params).getHash(), hash.hash)) {
|
||||
// Found it. Update file position for next time.
|
||||
channel.position(pos);
|
||||
return record;
|
||||
}
|
||||
// Did not find it.
|
||||
if (pos == 1 + 32) {
|
||||
// At the start so wrap around to the end.
|
||||
pos = channel.size() - Record.SIZE;
|
||||
} else {
|
||||
// Move backwards.
|
||||
pos = pos - Record.SIZE;
|
||||
assert pos >= 1 + 32 : pos;
|
||||
}
|
||||
} while (pos != startPos);
|
||||
// Was never stored.
|
||||
channel.position(pos);
|
||||
return null;
|
||||
}
|
||||
|
||||
public synchronized StoredBlock getChainHead() throws BlockStoreException {
|
||||
return get(chainHead.hash);
|
||||
}
|
||||
|
||||
public synchronized void setChainHead(StoredBlock chainHead) throws BlockStoreException {
|
||||
try {
|
||||
byte[] hash = chainHead.getHeader().getHash();
|
||||
this.chainHead = new Sha256Hash(hash);
|
||||
// Write out new hash to the first 32 bytes of the file past one (first byte is version number).
|
||||
channel.write(ByteBuffer.wrap(hash), 1);
|
||||
} catch (IOException e) {
|
||||
throw new BlockStoreException(e);
|
||||
}
|
||||
}
|
||||
}
|
@@ -50,7 +50,7 @@ public class PingService {
|
||||
|
||||
// Load the block chain, if there is one stored locally.
|
||||
System.out.println("Reading block store from disk");
|
||||
BlockStore blockStore = new DiskBlockStore(params, new File(filePrefix + ".blockchain"));
|
||||
BlockStore blockStore = new BoundedOverheadBlockStore(params, new File(filePrefix + ".blockchain"));
|
||||
|
||||
// Connect to the localhost node. One minute timeout since we won't try any other peers
|
||||
System.out.println("Connecting ...");
|
||||
|
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.google.bitcoin.core;
|
||||
|
||||
import org.junit.Test;
|
||||
import java.io.File;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class BoundedOverheadBlockStoreTest {
|
||||
@Test
|
||||
public void testStorage() throws Exception {
|
||||
File temp = File.createTempFile("bitcoinj-test", null, null);
|
||||
System.out.println(temp.getAbsolutePath());
|
||||
temp.deleteOnExit();
|
||||
|
||||
NetworkParameters params = NetworkParameters.unitTests();
|
||||
Address to = new ECKey().toAddress(params);
|
||||
BoundedOverheadBlockStore store = new BoundedOverheadBlockStore(params, temp);
|
||||
// Check the first block in a new store is the genesis block.
|
||||
StoredBlock genesis = store.getChainHead();
|
||||
assertEquals(params.genesisBlock, genesis.getHeader());
|
||||
|
||||
// Build a new block.
|
||||
StoredBlock b1 = genesis.build(genesis.getHeader().createNextBlock(to).cloneAsHeader());
|
||||
store.put(b1);
|
||||
store.setChainHead(b1);
|
||||
// Check we can get it back out again if we rebuild the store object.
|
||||
store = new BoundedOverheadBlockStore(params, temp);
|
||||
StoredBlock b2 = store.get(b1.getHeader().getHash());
|
||||
assertEquals(b1, b2);
|
||||
// Check the chain head was stored correctly also.
|
||||
assertEquals(b1, store.getChainHead());
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user