From e02ef51463fba1c01964e95dda85529713d988b2 Mon Sep 17 00:00:00 2001 From: jasperpotts Date: Thu, 19 Dec 2024 16:05:27 -0800 Subject: [PATCH] Spotless fixes Signed-off-by: jasperpotts --- .../record2blocks/Record2BlockCommand.java | 73 ++++++++++--------- .../record2blocks/gcp/MainNetBucket.java | 2 +- .../mirrornode/FetchBlockQuery.java | 2 +- .../model/ParsedSignatureFile.java | 31 +++++--- .../record2blocks/model/RecordFileInfo.java | 60 +++++++-------- 5 files changed, 91 insertions(+), 77 deletions(-) diff --git a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/Record2BlockCommand.java b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/Record2BlockCommand.java index 7b464828d..e88bcd256 100644 --- a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/Record2BlockCommand.java +++ b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/Record2BlockCommand.java @@ -134,8 +134,8 @@ public Record2BlockCommand() {} @Override public void run() { // create executor service - try(final ExecutorService executorService = Executors.newVirtualThreadPerTaskExecutor(); - final ExecutorService singleThreadWritingExecutor = Executors.newSingleThreadExecutor()) { + try (final ExecutorService executorService = Executors.newVirtualThreadPerTaskExecutor(); + final ExecutorService singleThreadWritingExecutor = Executors.newSingleThreadExecutor()) { blocksDir = dataDir.resolve("blocks"); blocksJsonDir = dataDir.resolve("blocks-json"); // enable cache, disable if doing large batches @@ -172,16 +172,15 @@ public void run() { // get the time of the record file for this block, from converted mirror node data final long blockTime = blockTimes.getBlockTime(blockNumber); final Instant blockTimeInstant = blockTimeLongToInstant(blockTime); - System.out.printf(Ansi.AUTO.string("@|bold,green,underline Processing block|@ %d" - + " @|green at blockTime|@ %s" - + " @|cyan Progress = block %d of %d" + - " = %.2f%% |@\n"), + System.out.printf( + Ansi.AUTO.string("@|bold,green,underline Processing block|@ %d" + + " @|green at blockTime|@ %s" + + " @|cyan Progress = block %d of %d" + " = %.2f%% |@\n"), blockNumber, blockTimeInstant, - blockNumber-startBlock+1, - endBlock-startBlock+1, - ((double)(blockNumber-startBlock)/(double)(endBlock-startBlock))*100d - ); + blockNumber - startBlock + 1, + endBlock - startBlock + 1, + ((double) (blockNumber - startBlock) / (double) (endBlock - startBlock)) * 100d); // round instant to nearest hour Instant blockTimeHour = blockTimeInstant.truncatedTo(ChronoUnit.HOURS); // check if we are the same hour as last block, if not load the new hour @@ -204,8 +203,8 @@ public void run() { // The next 3 steps we do in background threads as they all download files from GCP which can be slow // now we need to download the most common record file & parse version information out of record file - final Future recordFileInfoFuture = executorService.submit(() -> - RecordFileInfo.parse(blockInfo.mostCommonRecordFile().chainFile().download(mainNetBucket))); + final Future recordFileInfoFuture = executorService.submit(() -> RecordFileInfo.parse( + blockInfo.mostCommonRecordFile().chainFile().download(mainNetBucket))); // download and parse all signature files then convert signature files to list of RecordFileSignatures final List> recordFileSignatureFutures = blockInfo.signatureFiles().stream() @@ -216,19 +215,19 @@ public void run() { .toList(); // download most common sidecar files, one for each numbered sidecar - final List> sideCarsFutures = - blockInfo.sidecarFiles().values().stream() - .map(sidecarFile -> - executorService.submit(() -> { - byte[] sidecarFileBytes = sidecarFile.mostCommonSidecarFile().chainFile() - .download(mainNetBucket); - try { - return SidecarFile.PROTOBUF.parse(Bytes.wrap(sidecarFileBytes)); - } catch (ParseException e) { - throw new RuntimeException(e); - } - }) - ).toList(); + final List> sideCarsFutures = blockInfo.sidecarFiles().values().stream() + .map(sidecarFile -> executorService.submit(() -> { + byte[] sidecarFileBytes = sidecarFile + .mostCommonSidecarFile() + .chainFile() + .download(mainNetBucket); + try { + return SidecarFile.PROTOBUF.parse(Bytes.wrap(sidecarFileBytes)); + } catch (ParseException e) { + throw new RuntimeException(e); + } + })) + .toList(); // collect all background computed data from futures final RecordFileInfo recordFileVersionInfo = recordFileInfoFuture.get(); @@ -246,8 +245,8 @@ public void run() { final RecordFileItem recordFileItem = new RecordFileItem( new Timestamp(blockTimeInstant.getEpochSecond(), blockTimeInstant.getNano()), Bytes.wrap(recordFileVersionInfo.recordFileContents()), - sideCars,recordFileSignatures - ); + sideCars, + recordFileSignatures); final Block block = new Block(List.of( new BlockItem(new OneOf<>(ItemOneOfType.BLOCK_HEADER, blockHeader)), new BlockItem(new OneOf<>(ItemOneOfType.RECORD_FILE, recordFileItem)))); @@ -266,16 +265,18 @@ public void run() { blockJsonPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE))) { Block.JSON.write(block, out); } - System.out.println(Ansi.AUTO.string("@|bold,yellow Wrote block [|@"+finalBlockNumber+ - "@|bold,yellow ]to|@ " + blockPath.dirPath() - + "/" + blockPath.zipFileName() + "@|bold,cyan :|@" - + blockPath.blockFileName() + - "@|bold,yellow ] and json to|@ " + blockJsonPath)); + System.out.println(Ansi.AUTO.string( + "@|bold,yellow Wrote block [|@" + finalBlockNumber + "@|bold,yellow ]to|@ " + + blockPath.dirPath() + + "/" + blockPath.zipFileName() + "@|bold,cyan :|@" + + blockPath.blockFileName() + "@|bold,yellow ] and json to|@ " + + blockJsonPath)); } else { - System.out.println(Ansi.AUTO.string("@|bold,yellow Wrote block [|@"+finalBlockNumber+ - "@|bold,yellow ]to|@ " + blockPath.dirPath() - + "/" + blockPath.zipFileName() + "@|bold,cyan :|@" - + blockPath.blockFileName())); + System.out.println(Ansi.AUTO.string( + "@|bold,yellow Wrote block [|@" + finalBlockNumber + "@|bold,yellow ]to|@ " + + blockPath.dirPath() + + "/" + blockPath.zipFileName() + "@|bold,cyan :|@" + + blockPath.blockFileName())); } } catch (IOException e) { e.printStackTrace(); diff --git a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/gcp/MainNetBucket.java b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/gcp/MainNetBucket.java index 0c995bf35..d4731905d 100644 --- a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/gcp/MainNetBucket.java +++ b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/gcp/MainNetBucket.java @@ -101,7 +101,7 @@ public byte[] download(String path) { final Path cachedFilePath = cacheDir.resolve(path); byte[] rawBytes; if (cacheEnabled && Files.exists(cachedFilePath)) { - rawBytes = Files.readAllBytes(cachedFilePath); + rawBytes = Files.readAllBytes(cachedFilePath); } else { rawBytes = STREAMS_BUCKET.get(path).getContent(); if (cacheEnabled) { diff --git a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/mirrornode/FetchBlockQuery.java b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/mirrornode/FetchBlockQuery.java index 4b20453a3..5111a13b2 100644 --- a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/mirrornode/FetchBlockQuery.java +++ b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/mirrornode/FetchBlockQuery.java @@ -51,7 +51,7 @@ public static String getRecordFileNameForBlock(long blockNumber) { public static Bytes getPreviousHashForBlock(long blockNumber) { final String url = "https://mainnet-public.mirrornode.hedera.com/api/v1/blocks/" + blockNumber; final JsonObject json = readUrl(url); - final String hashStr = json.get("previous_hash").getAsString(); + final String hashStr = json.get("previous_hash").getAsString(); return Bytes.wrap(HexFormat.of().parseHex(hashStr.substring(2))); // remove 0x prefix and parse } diff --git a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/ParsedSignatureFile.java b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/ParsedSignatureFile.java index f289a71fc..b2df6bb57 100644 --- a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/ParsedSignatureFile.java +++ b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/ParsedSignatureFile.java @@ -187,6 +187,7 @@ public record ParsedSignatureFile(int nodeId, byte[] fileHash, byte[] signature) * The marker for the file hash in a V3 signature file. This is the first byte so also acts like a version number. */ public static final byte V2_FILE_HASH_MARKER = 4; + public static final byte FILE_VERSION_5 = 5; public static final byte FILE_VERSION_6 = 6; public static final byte V3_SIGNATURE_MARKER = 3; @@ -199,9 +200,8 @@ public record ParsedSignatureFile(int nodeId, byte[] fileHash, byte[] signature) @Override public String toString() { final HexFormat hexFormat = HexFormat.of(); - return "SignatureFile[" + - "nodeId=" + nodeId + ", " + - "fileHash=" + return "SignatureFile[" + "nodeId=" + + nodeId + ", " + "fileHash=" + hexFormat.formatHex(fileHash) + ", signature=" + hexFormat.formatHex(signature) + ']'; } @@ -215,13 +215,14 @@ public String toString() { */ public static ParsedSignatureFile downloadAndParse(ChainFile signatureChainFile, MainNetBucket mainNetBucket) { // first download - try(DataInputStream in = new DataInputStream(signatureChainFile.downloadStreaming(mainNetBucket))) { - // extract node ID from file path. This depends on the fixed relationship between node account ids and node ids. + try (DataInputStream in = new DataInputStream(signatureChainFile.downloadStreaming(mainNetBucket))) { + // extract node ID from file path. This depends on the fixed relationship between node account ids and node + // ids. final int nodeId = signatureChainFile.nodeAccountId() - 3; // now parse final int firstByte = in.read(); // the first byte is either the file hash marker or a version number in V6 record stream - switch(firstByte) { + switch (firstByte) { case V2_FILE_HASH_MARKER: final byte[] fileHash = new byte[48]; in.readFully(fileHash); @@ -266,16 +267,26 @@ public static ParsedSignatureFile downloadAndParse(ChainFile signatureChainFile, // everything from here on is protobuf encoded try { SignatureFile signatureFile = SignatureFile.PROTOBUF.parse(new ReadableStreamingData(in)); + if(signatureFile.fileSignature() == null) { + throw new IllegalArgumentException("Invalid signature file, missing file signature"); + } + if (signatureFile.fileSignature().hashObject() == null) { + throw new IllegalArgumentException("Invalid signature file, missing hash object"); + } return new ParsedSignatureFile( nodeId, - signatureFile.fileSignature().hashObject().hash().toByteArray(), + signatureFile + .fileSignature() + .hashObject() + .hash() + .toByteArray(), signatureFile.fileSignature().signature().toByteArray()); } catch (ParseException e) { throw new RuntimeException("Error protobuf parsing V6 signature file", e); } default: - throw new IllegalArgumentException("Invalid first byte [" + firstByte + "] expected " + - V2_FILE_HASH_MARKER + " or " + FILE_VERSION_6); + throw new IllegalArgumentException("Invalid first byte [" + firstByte + "] expected " + + V2_FILE_HASH_MARKER + " or " + FILE_VERSION_6); } } catch (IOException e) { throw new RuntimeException("Error downloading or parsing signature file", e); @@ -298,7 +309,7 @@ public static byte[] readHashObject(DataInputStream in) throws IOException { throw new IllegalArgumentException("Invalid hash class ID"); } // read hash class version - if(in.readInt() != 1) { + if (in.readInt() != 1) { throw new IllegalArgumentException("Invalid hash class version"); } // read hash object, starting with digest type SHA384 diff --git a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/RecordFileInfo.java b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/RecordFileInfo.java index fb1baff2e..c0d447fc5 100644 --- a/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/RecordFileInfo.java +++ b/tools/src/main/java/com/hedera/block/tools/commands/record2blocks/model/RecordFileInfo.java @@ -1,3 +1,19 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.hedera.block.tools.commands.record2blocks.model; import static com.hedera.block.tools.commands.record2blocks.model.ParsedSignatureFile.HASH_OBJECT_SIZE_BYTES; @@ -24,13 +40,9 @@ * @param hapiProtoVersion the HAPI protocol version * @param blockHash the block hash */ -public record RecordFileInfo( - SemanticVersion hapiProtoVersion, - Bytes blockHash, - byte[] recordFileContents -) { +public record RecordFileInfo(SemanticVersion hapiProtoVersion, Bytes blockHash, byte[] recordFileContents) { /* The length of the header in a v2 record file */ - private static final int V2_HEADER_LENGTH = Integer.BYTES + Integer.BYTES + 1 + 48; + private static final int V2_HEADER_LENGTH = Integer.BYTES + Integer.BYTES + 1 + 48; /** * Parses the record file to extract the HAPI protocol version and the block hash. @@ -39,14 +51,13 @@ public record RecordFileInfo( * @return the record file version info */ public static RecordFileInfo parse(byte[] recordFile) { - try(DataInputStream in = new DataInputStream(new ByteArrayInputStream(recordFile))) { + try (DataInputStream in = new DataInputStream(new ByteArrayInputStream(recordFile))) { final int recordFormatVersion = in.readInt(); // This is a minimal parser for all record file formats only extracting the necessary information return switch (recordFormatVersion) { - case 2 -> { + case 2 -> { final int hapiMajorVersion = in.readInt(); - final SemanticVersion hapiProtoVersion = new SemanticVersion( - hapiMajorVersion, 0, 0, null, null); + final SemanticVersion hapiProtoVersion = new SemanticVersion(hapiMajorVersion, 0, 0, null, null); // The hash for v2 files is the hash(header, hash(content)) this is different to other versions // the block hash is not available in the file so we have to calculate it MessageDigest digest = MessageDigest.getInstance("SHA-384"); @@ -54,33 +65,25 @@ public static RecordFileInfo parse(byte[] recordFile) { final byte[] contentHash = digest.digest(); digest.update(recordFile, 0, V2_HEADER_LENGTH); digest.update(contentHash); - yield new RecordFileInfo( - hapiProtoVersion, - Bytes.wrap(digest.digest()), - recordFile - ); + yield new RecordFileInfo(hapiProtoVersion, Bytes.wrap(digest.digest()), recordFile); } case 5 -> { final int hapiMajorVersion = in.readInt(); final int hapiMinorVersion = in.readInt(); final int hapiPatchVersion = in.readInt(); - final SemanticVersion hapiProtoVersion = new SemanticVersion( - hapiMajorVersion, hapiMinorVersion, hapiPatchVersion, null, null); + final SemanticVersion hapiProtoVersion = + new SemanticVersion(hapiMajorVersion, hapiMinorVersion, hapiPatchVersion, null, null); // skip to last hash object. This trick allows us to not have to understand the format for record // file items and their contents which is much more complicated. For v5 and v6 the block hash is the // end running hash which is written as a special item at the end of the file. in.skipBytes(in.available() - HASH_OBJECT_SIZE_BYTES); final byte[] endHashObject = readHashObject(in); - yield new RecordFileInfo( - hapiProtoVersion, - Bytes.wrap(endHashObject), - recordFile - ); + yield new RecordFileInfo(hapiProtoVersion, Bytes.wrap(endHashObject), recordFile); } case 6 -> { // V6 is nice and easy as it is all protobuf encoded after the first version integer - final RecordStreamFile recordStreamFile = RecordStreamFile.PROTOBUF.parse(new ReadableStreamingData( - in)); + final RecordStreamFile recordStreamFile = + RecordStreamFile.PROTOBUF.parse(new ReadableStreamingData(in)); // For v6 the block hash is the end running hash which is accessed via endObjectRunningHash() if (recordStreamFile.endObjectRunningHash() == null) { throw new IllegalStateException("No end object running hash in record file"); @@ -88,14 +91,13 @@ yield new RecordFileInfo( yield new RecordFileInfo( recordStreamFile.hapiProtoVersion(), recordStreamFile.endObjectRunningHash().hash(), - recordFile - ); + recordFile); } - default -> - throw new UnsupportedOperationException("Unsupported record format version: " + recordFormatVersion); + default -> throw new UnsupportedOperationException( + "Unsupported record format version: " + recordFormatVersion); }; } catch (Exception e) { throw new RuntimeException(e); } } -} \ No newline at end of file +}