Moved some of the less important arbitrary transaction related logs from INFO to DEBUG/TRACE.

Logs can be reinstated by adding these lines to log4j2.properties:

logger.arbitrary.name = org.qortal.arbitrary
logger.arbitrary.level = debug
logger.arbitrarycontroller.name = org.qortal.controller.arbitrary
logger.arbitrarycontroller.level = debug
This commit is contained in:
CalDescent 2021-12-28 17:00:09 +00:00
parent 90ced351f4
commit 9e3847e56f
12 changed files with 56 additions and 56 deletions

View File

@ -215,7 +215,7 @@ public class ArbitraryDataBuilder {
// Loop from the second path onwards // Loop from the second path onwards
for (int i=1; i<paths.size(); i++) { for (int i=1; i<paths.size(); i++) {
String identifierPrefix = this.identifier != null ? String.format("[%s]", this.identifier) : ""; String identifierPrefix = this.identifier != null ? String.format("[%s]", this.identifier) : "";
LOGGER.info(String.format("[%s][%s]%s Applying layer %d...", this.service, this.name, identifierPrefix, i)); LOGGER.debug(String.format("[%s][%s]%s Applying layer %d...", this.service, this.name, identifierPrefix, i));
// Create an instance of ArbitraryDataCombiner // Create an instance of ArbitraryDataCombiner
Path pathAfter = this.paths.get(i); Path pathAfter = this.paths.get(i);

View File

@ -61,7 +61,7 @@ public class ArbitraryDataCombiner {
FileUtils.deleteDirectory(directory); FileUtils.deleteDirectory(directory);
} catch (IOException e) { } catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue // This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.info("Unable to cleanup directory {}", directory.toString()); LOGGER.debug("Unable to cleanup directory {}", directory.toString());
} }
} }
@ -74,7 +74,7 @@ public class ArbitraryDataCombiner {
// No need to log anything // No need to log anything
} catch (IOException e) { } catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue // This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.info("Unable to cleanup parent directory {}", parentDirectory.toString()); LOGGER.debug("Unable to cleanup parent directory {}", parentDirectory.toString());
} }
} }
} }

View File

@ -70,7 +70,7 @@ public class ArbitraryDataCreatePatch {
try { try {
FilesystemUtils.safeDeleteDirectory(this.workingPath, true); FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
} catch (IOException e) { } catch (IOException e) {
LOGGER.info("Unable to cleanup working directory"); LOGGER.debug("Unable to cleanup working directory");
} }
} }
@ -78,7 +78,7 @@ public class ArbitraryDataCreatePatch {
try { try {
FilesystemUtils.safeDeleteDirectory(this.finalPath, true); FilesystemUtils.safeDeleteDirectory(this.finalPath, true);
} catch (IOException e) { } catch (IOException e) {
LOGGER.info("Unable to cleanup diff directory on failure"); LOGGER.debug("Unable to cleanup diff directory on failure");
} }
} }

View File

@ -106,7 +106,7 @@ public class ArbitraryDataDiff {
} }
private void preExecute() { private void preExecute() {
LOGGER.info("Generating diff..."); LOGGER.debug("Generating diff...");
} }
private void postExecute() { private void postExecute() {
@ -164,18 +164,18 @@ public class ArbitraryDataDiff {
boolean wasModified = false; boolean wasModified = false;
if (!Files.exists(beforePathAbsolute)) { if (!Files.exists(beforePathAbsolute)) {
LOGGER.info("File was added: {}", afterPathRelative.toString()); LOGGER.trace("File was added: {}", afterPathRelative.toString());
diff.addedPaths.add(afterPathRelative); diff.addedPaths.add(afterPathRelative);
wasAdded = true; wasAdded = true;
} }
else if (Files.size(afterPathAbsolute) != Files.size(beforePathAbsolute)) { else if (Files.size(afterPathAbsolute) != Files.size(beforePathAbsolute)) {
// Check file size first because it's quicker // Check file size first because it's quicker
LOGGER.info("File size was modified: {}", afterPathRelative.toString()); LOGGER.trace("File size was modified: {}", afterPathRelative.toString());
wasModified = true; wasModified = true;
} }
else if (!Arrays.equals(ArbitraryDataDiff.digestFromPath(afterPathAbsolute), ArbitraryDataDiff.digestFromPath(beforePathAbsolute))) { else if (!Arrays.equals(ArbitraryDataDiff.digestFromPath(afterPathAbsolute), ArbitraryDataDiff.digestFromPath(beforePathAbsolute))) {
// Check hashes as a last resort // Check hashes as a last resort
LOGGER.info("File contents were modified: {}", afterPathRelative.toString()); LOGGER.trace("File contents were modified: {}", afterPathRelative.toString());
wasModified = true; wasModified = true;
} }
@ -236,7 +236,7 @@ public class ArbitraryDataDiff {
} }
if (!Files.exists(directoryPathAfter)) { if (!Files.exists(directoryPathAfter)) {
LOGGER.info("Directory was removed: {}", directoryPathAfter.toString()); LOGGER.trace("Directory was removed: {}", directoryPathAfter.toString());
diff.removedPaths.add(directoryPathBefore); diff.removedPaths.add(directoryPathBefore);
// TODO: we might need to mark directories differently to files // TODO: we might need to mark directories differently to files
} }

View File

@ -82,20 +82,20 @@ public class ArbitraryDataMerge {
List<Path> addedPaths = this.metadata.getAddedPaths(); List<Path> addedPaths = this.metadata.getAddedPaths();
for (Path path : addedPaths) { for (Path path : addedPaths) {
LOGGER.info("File was added: {}", path.toString()); LOGGER.trace("File was added: {}", path.toString());
Path filePath = Paths.get(this.pathAfter.toString(), path.toString()); Path filePath = Paths.get(this.pathAfter.toString(), path.toString());
ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, path); ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, path);
} }
List<ModifiedPath> modifiedPaths = this.metadata.getModifiedPaths(); List<ModifiedPath> modifiedPaths = this.metadata.getModifiedPaths();
for (ModifiedPath modifiedPath : modifiedPaths) { for (ModifiedPath modifiedPath : modifiedPaths) {
LOGGER.info("File was modified: {}", modifiedPath.toString()); LOGGER.trace("File was modified: {}", modifiedPath.toString());
this.applyPatch(modifiedPath); this.applyPatch(modifiedPath);
} }
List<Path> removedPaths = this.metadata.getRemovedPaths(); List<Path> removedPaths = this.metadata.getRemovedPaths();
for (Path path : removedPaths) { for (Path path : removedPaths) {
LOGGER.info("File was removed: {}", path.toString()); LOGGER.trace("File was removed: {}", path.toString());
ArbitraryDataMerge.deletePathInBaseDir(this.mergePath, path); ArbitraryDataMerge.deletePathInBaseDir(this.mergePath, path);
} }
} }

View File

@ -230,7 +230,7 @@ public class ArbitraryDataReader {
}); });
} catch (IOException e) { } catch (IOException e) {
LOGGER.info("Unable to delete file or directory: {}", e.getMessage()); LOGGER.debug("Unable to delete file or directory: {}", e.getMessage());
} }
} }
} }
@ -481,7 +481,7 @@ public class ArbitraryDataReader {
// No need to log anything // No need to log anything
} catch (IOException e) { } catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue // This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.info("Unable to cleanup directories: {}", e.getMessage()); LOGGER.debug("Unable to cleanup directories: {}", e.getMessage());
} }
// Finally, update filePath to point to uncompressedPath // Finally, update filePath to point to uncompressedPath

View File

@ -146,7 +146,7 @@ public class ArbitraryDataRenderer {
try { try {
FileUtils.deleteDirectory(new File(unzippedPath)); FileUtils.deleteDirectory(new File(unzippedPath));
} catch (IOException ioException) { } catch (IOException ioException) {
LOGGER.info("Unable to delete directory: {}", unzippedPath, e); LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
} }
} }
} catch (IOException e) { } catch (IOException e) {

View File

@ -123,7 +123,7 @@ public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata {
patch.put("modified", modifiedPaths); patch.put("modified", modifiedPaths);
this.jsonString = patch.toString(2); this.jsonString = patch.toString(2);
LOGGER.info("Patch metadata: {}", this.jsonString); LOGGER.debug("Patch metadata: {}", this.jsonString);
} }
public void setAddedPaths(List<Path> addedPaths) { public void setAddedPaths(List<Path> addedPaths) {

View File

@ -433,7 +433,7 @@ public class ArbitraryDataCleanupManager extends Thread {
// If the directory is empty, we still need to delete its parent folder // If the directory is empty, we still need to delete its parent folder
if (contentsCount == 0 && tempDir.toFile().isDirectory() && tempDir.toFile().exists()) { if (contentsCount == 0 && tempDir.toFile().isDirectory() && tempDir.toFile().exists()) {
try { try {
LOGGER.info("Parent directory {} is empty, so deleting it", tempDir); LOGGER.debug("Parent directory {} is empty, so deleting it", tempDir);
FilesystemUtils.safeDeleteDirectory(tempDir, false); FilesystemUtils.safeDeleteDirectory(tempDir, false);
} catch(IOException e){ } catch(IOException e){
LOGGER.info("Unable to delete parent directory: {}", tempDir); LOGGER.info("Unable to delete parent directory: {}", tempDir);
@ -519,7 +519,7 @@ public class ArbitraryDataCleanupManager extends Thread {
FilesystemUtils.safeDeleteDirectory(directory.toPath(), true); FilesystemUtils.safeDeleteDirectory(directory.toPath(), true);
return true; return true;
} catch (IOException e) { } catch (IOException e) {
LOGGER.info("Unable to delete directory: {}", directory); LOGGER.debug("Unable to delete directory: {}", directory);
} }
return false; return false;
} }

View File

@ -235,7 +235,7 @@ public class ArbitraryDataFileListManager {
} }
this.addToSignatureRequests(signature58, true, false); this.addToSignatureRequests(signature58, true, false);
LOGGER.info(String.format("Sending data file list request for signature %s...", Base58.encode(signature))); LOGGER.debug(String.format("Sending data file list request for signature %s...", Base58.encode(signature)));
// Build request // Build request
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature); Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature);
@ -289,7 +289,7 @@ public class ArbitraryDataFileListManager {
} }
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message; ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
LOGGER.info("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size()); LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
// Do we have a pending request for this data? // TODO: might we want to relay all of them anyway? // Do we have a pending request for this data? // TODO: might we want to relay all of them anyway?
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId()); Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
@ -327,7 +327,7 @@ public class ArbitraryDataFileListManager {
// // Check all hashes exist // // Check all hashes exist
// for (byte[] hash : hashes) { // for (byte[] hash : hashes) {
// //LOGGER.info("Received hash {}", Base58.encode(hash)); // //LOGGER.debug("Received hash {}", Base58.encode(hash));
// if (!arbitraryDataFile.containsChunk(hash)) { // if (!arbitraryDataFile.containsChunk(hash)) {
// // Check the hash against the complete file // // Check the hash against the complete file
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) { // if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
@ -366,7 +366,7 @@ public class ArbitraryDataFileListManager {
} }
// Forward to requesting peer // Forward to requesting peer
LOGGER.info("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer); LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) { if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) {
requestingPeer.disconnect("failed to forward arbitrary data file list"); requestingPeer.disconnect("failed to forward arbitrary data file list");
} }
@ -394,7 +394,7 @@ public class ArbitraryDataFileListManager {
return; return;
} }
LOGGER.info("Received hash list request from peer {} for signature {}", peer, Base58.encode(signature)); LOGGER.debug("Received hash list request from peer {} for signature {}", peer, Base58.encode(signature));
List<byte[]> hashes = new ArrayList<>(); List<byte[]> hashes = new ArrayList<>();
ArbitraryTransactionData transactionData = null; ArbitraryTransactionData transactionData = null;
@ -424,9 +424,9 @@ public class ArbitraryDataFileListManager {
for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) { for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) {
if (chunk.exists()) { if (chunk.exists()) {
hashes.add(chunk.getHash()); hashes.add(chunk.getHash());
//LOGGER.info("Added hash {}", chunk.getHash58()); //LOGGER.trace("Added hash {}", chunk.getHash58());
} else { } else {
LOGGER.info("Couldn't add hash {} because it doesn't exist", chunk.getHash58()); LOGGER.debug("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
} }
} }
} else { } else {
@ -452,17 +452,17 @@ public class ArbitraryDataFileListManager {
ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes); ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
arbitraryDataFileListMessage.setId(message.getId()); arbitraryDataFileListMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileListMessage)) { if (!peer.sendMessage(arbitraryDataFileListMessage)) {
LOGGER.info("Couldn't send list of hashes"); LOGGER.debug("Couldn't send list of hashes");
peer.disconnect("failed to send list of hashes"); peer.disconnect("failed to send list of hashes");
} }
LOGGER.info("Sent list of hashes (count: {})", hashes.size()); LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
} }
else { else {
boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())); boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) { if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it // In relay mode - so ask our other peers if they have it
LOGGER.info("Rebroadcasted hash list request from peer {} for signature {} to our other peers", peer, Base58.encode(signature)); LOGGER.debug("Rebroadcasted hash list request from peer {} for signature {} to our other peers", peer, Base58.encode(signature));
Network.getInstance().broadcast( Network.getInstance().broadcast(
broadcastPeer -> broadcastPeer == peer || broadcastPeer -> broadcastPeer == peer ||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())

View File

@ -118,15 +118,15 @@ public class ArbitraryDataFileManager {
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) { if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null); ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null);
if (receivedArbitraryDataFileMessage != null) { if (receivedArbitraryDataFileMessage != null) {
LOGGER.info("Received data file {} from peer {}", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer); LOGGER.debug("Received data file {} from peer {}", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer);
receivedAtLeastOneFile = true; receivedAtLeastOneFile = true;
} }
else { else {
LOGGER.info("Peer {} didn't respond with data file {} for signature {}", peer, Base58.encode(hash), Base58.encode(signature)); LOGGER.debug("Peer {} didn't respond with data file {} for signature {}", peer, Base58.encode(hash), Base58.encode(signature));
} }
} }
else { else {
LOGGER.info("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature)); LOGGER.debug("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature));
} }
} }
} }
@ -134,7 +134,7 @@ public class ArbitraryDataFileManager {
if (receivedAtLeastOneFile) { if (receivedAtLeastOneFile) {
// Update our lookup table to indicate that this peer holds data for this signature // Update our lookup table to indicate that this peer holds data for this signature
String peerAddress = peer.getPeerData().getAddress().toString(); String peerAddress = peer.getPeerData().getAddress().toString();
LOGGER.info("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature)); LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature));
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer);
repository.discardChanges(); repository.discardChanges();
repository.getArbitraryRepository().save(arbitraryPeerData); repository.getArbitraryRepository().save(arbitraryPeerData);
@ -171,7 +171,7 @@ public class ArbitraryDataFileManager {
// Fetch the file if it doesn't exist locally // Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) { if (!fileAlreadyExists) {
String hash58 = Base58.encode(hash); String hash58 = Base58.encode(hash);
LOGGER.info(String.format("Fetching data file %.8s from peer %s", hash58, peer)); LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
arbitraryDataFileRequests.put(hash58, NTP.getTime()); arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash); Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
@ -196,7 +196,7 @@ public class ArbitraryDataFileManager {
if (isRelayRequest) { if (isRelayRequest) {
if (!fileAlreadyExists) { if (!fileAlreadyExists) {
// File didn't exist locally before the request, and it's a forwarding request, so delete it // File didn't exist locally before the request, and it's a forwarding request, so delete it
LOGGER.info("Deleting file {} because it was needed for forwarding only", Base58.encode(hash)); LOGGER.debug("Deleting file {} because it was needed for forwarding only", Base58.encode(hash));
ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile(); ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile();
dataFile.delete(); dataFile.delete();
} }
@ -217,17 +217,17 @@ public class ArbitraryDataFileManager {
return; return;
} }
LOGGER.info("Received arbitrary data file - forwarding is needed"); LOGGER.debug("Received arbitrary data file - forwarding is needed");
// The ID needs to match that of the original request // The ID needs to match that of the original request
message.setId(originalMessage.getId()); message.setId(originalMessage.getId());
if (!requestingPeer.sendMessage(message)) { if (!requestingPeer.sendMessage(message)) {
LOGGER.info("Failed to forward arbitrary data file to peer {}", requestingPeer); LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
requestingPeer.disconnect("failed to forward arbitrary data file"); requestingPeer.disconnect("failed to forward arbitrary data file");
} }
else { else {
LOGGER.info("Forwarded arbitrary data file to peer {}", requestingPeer); LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
} }
} }
@ -243,11 +243,11 @@ public class ArbitraryDataFileManager {
List<ArbitraryPeerData> peers = repository.getArbitraryRepository().getArbitraryPeerDataForSignature(signature); List<ArbitraryPeerData> peers = repository.getArbitraryRepository().getArbitraryPeerDataForSignature(signature);
if (peers == null || peers.isEmpty()) { if (peers == null || peers.isEmpty()) {
LOGGER.info("No peers found for signature {}", signature58); LOGGER.debug("No peers found for signature {}", signature58);
return false; return false;
} }
LOGGER.info("Attempting a direct peer connection for signature {}...", signature58); LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58);
// Peers found, so pick a random one and request data from it // Peers found, so pick a random one and request data from it
int index = new SecureRandom().nextInt(peers.size()); int index = new SecureRandom().nextInt(peers.size());
@ -256,7 +256,7 @@ public class ArbitraryDataFileManager {
return Network.getInstance().requestDataFromPeer(peerAddressString, signature); return Network.getInstance().requestDataFromPeer(peerAddressString, signature);
} catch (DataException e) { } catch (DataException e) {
LOGGER.info("Unable to fetch peer list from repository"); LOGGER.debug("Unable to fetch peer list from repository");
} }
return false; return false;
@ -277,43 +277,43 @@ public class ArbitraryDataFileManager {
byte[] signature = getArbitraryDataFileMessage.getSignature(); byte[] signature = getArbitraryDataFileMessage.getSignature();
Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet(); Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet();
LOGGER.info("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash)); LOGGER.debug("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash));
try { try {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
Triple<String, Peer, Long> relayInfo = this.arbitraryRelayMap.get(hash58); Triple<String, Peer, Long> relayInfo = this.arbitraryRelayMap.get(hash58);
if (arbitraryDataFile.exists()) { if (arbitraryDataFile.exists()) {
LOGGER.info("Hash {} exists", hash58); LOGGER.trace("Hash {} exists", hash58);
// We can serve the file directly as we already have it // We can serve the file directly as we already have it
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile); ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
arbitraryDataFileMessage.setId(message.getId()); arbitraryDataFileMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileMessage)) { if (!peer.sendMessage(arbitraryDataFileMessage)) {
LOGGER.info("Couldn't sent file"); LOGGER.debug("Couldn't sent file");
peer.disconnect("failed to send file"); peer.disconnect("failed to send file");
} }
LOGGER.info("Sent file {}", arbitraryDataFile); LOGGER.debug("Sent file {}", arbitraryDataFile);
} }
else if (relayInfo != null) { else if (relayInfo != null) {
LOGGER.info("We have relay info for hash {}", Base58.encode(hash)); LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
// We need to ask this peer for the file // We need to ask this peer for the file
Peer peerToAsk = relayInfo.getB(); Peer peerToAsk = relayInfo.getB();
if (peerToAsk != null) { if (peerToAsk != null) {
// Forward the message to this peer // Forward the message to this peer
LOGGER.info("Asking peer {} for hash {}", peerToAsk, hash58); LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
this.fetchArbitraryDataFile(peerToAsk, peer, signature, hash, message); this.fetchArbitraryDataFile(peerToAsk, peer, signature, hash, message);
// Remove from the map regardless of outcome, as the relay attempt is now considered complete // Remove from the map regardless of outcome, as the relay attempt is now considered complete
arbitraryRelayMap.remove(hash58); arbitraryRelayMap.remove(hash58);
} }
else { else {
LOGGER.info("Peer {} not found in relay info", peer); LOGGER.debug("Peer {} not found in relay info", peer);
} }
} }
else { else {
LOGGER.info("Hash {} doesn't exist and we don't have relay info", hash58); LOGGER.debug("Hash {} doesn't exist and we don't have relay info", hash58);
// We don't have this file // We don't have this file
Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement(); Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement();
@ -326,16 +326,16 @@ public class ArbitraryDataFileManager {
Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList()); Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList());
fileUnknownMessage.setId(message.getId()); fileUnknownMessage.setId(message.getId());
if (!peer.sendMessage(fileUnknownMessage)) { if (!peer.sendMessage(fileUnknownMessage)) {
LOGGER.info("Couldn't sent file-unknown response"); LOGGER.debug("Couldn't sent file-unknown response");
peer.disconnect("failed to send file-unknown response"); peer.disconnect("failed to send file-unknown response");
} }
else { else {
LOGGER.info("Sent file-unknown response for file {}", arbitraryDataFile); LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);
} }
} }
} }
catch (DataException e) { catch (DataException e) {
LOGGER.info("Unable to handle request for arbitrary data file: {}", hash58); LOGGER.debug("Unable to handle request for arbitrary data file: {}", hash58);
} }
} }

View File

@ -152,7 +152,7 @@ public class ArbitraryDataManager extends Thread {
// Any arbitrary transactions we want to fetch data for? // Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true); List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true);
// LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit); // LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (signatures == null || signatures.isEmpty()) { if (signatures == null || signatures.isEmpty()) {
offset = 0; offset = 0;
break; break;
@ -367,7 +367,7 @@ public class ArbitraryDataManager extends Thread {
return; return;
} }
LOGGER.info("Received arbitrary signature list from peer {}", peer); LOGGER.debug("Received arbitrary signature list from peer {}", peer);
ArbitrarySignaturesMessage arbitrarySignaturesMessage = (ArbitrarySignaturesMessage) message; ArbitrarySignaturesMessage arbitrarySignaturesMessage = (ArbitrarySignaturesMessage) message;
List<byte[]> signatures = arbitrarySignaturesMessage.getSignatures(); List<byte[]> signatures = arbitrarySignaturesMessage.getSignatures();
@ -392,7 +392,7 @@ public class ArbitraryDataManager extends Thread {
if (existingEntry == null) { if (existingEntry == null) {
// We haven't got a record of this mapping yet, so add it // We haven't got a record of this mapping yet, so add it
LOGGER.info("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature)); LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature));
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer); ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer);
repository.getArbitraryRepository().save(arbitraryPeerData); repository.getArbitraryRepository().save(arbitraryPeerData);
repository.saveChanges(); repository.saveChanges();
@ -405,7 +405,7 @@ public class ArbitraryDataManager extends Thread {
// If at least one signature in this batch was new to us, we should rebroadcast the message to the // If at least one signature in this batch was new to us, we should rebroadcast the message to the
// network in case some peers haven't received it yet // network in case some peers haven't received it yet
if (containsNewEntry) { if (containsNewEntry) {
LOGGER.info("Rebroadcasting arbitrary signature list for peer {}", peerAddress); LOGGER.debug("Rebroadcasting arbitrary signature list for peer {}", peerAddress);
Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : arbitrarySignaturesMessage); Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : arbitrarySignaturesMessage);
} else { } else {
// Don't rebroadcast as otherwise we could get into a loop // Don't rebroadcast as otherwise we could get into a loop