diff --git a/cmd-rpc.go b/cmd-rpc.go index 6e46bb54..15d3ecca 100644 --- a/cmd-rpc.go +++ b/cmd-rpc.go @@ -153,6 +153,12 @@ func newCmd_rpc() *cli.Command { EpochSearchConcurrency: epochSearchConcurrency, }) + defer func() { + if err := multi.Close(); err != nil { + klog.Errorf("error closing multi-epoch: %s", err.Error()) + } + }() + for _, epoch := range epochs { if err := multi.AddEpoch(epoch.Epoch(), epoch); err != nil { return cli.Exit(fmt.Sprintf("failed to add epoch %d: %s", epoch.Epoch(), err.Error()), 1) diff --git a/cmd-x-index-all.go b/cmd-x-index-all.go index db06ffdf..1eae0589 100644 --- a/cmd-x-index-all.go +++ b/cmd-x-index-all.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "errors" "fmt" "io" "math/rand" @@ -211,7 +212,7 @@ func createAllIndexes( for { _cid, sectionLength, block, err := rd.NextNode() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err @@ -611,7 +612,7 @@ func verifyAllIndexes( for { _cid, sectionLength, block, err := rd.NextNode() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return err diff --git a/config.go b/config.go index a7699c4a..5451dcd9 100644 --- a/config.go +++ b/config.go @@ -125,6 +125,9 @@ type Config struct { URI URI `json:"uri" yaml:"uri"` } `json:"sig_exists" yaml:"sig_exists"` } `json:"indexes" yaml:"indexes"` + Genesis struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"genesis" yaml:"genesis"` } func (c *Config) ConfigFilepath() string { @@ -296,5 +299,20 @@ func (c *Config) Validate() error { } } } + { + // if epoch is 0, then the genesis URI must be set: + if *c.Epoch == 0 { + if c.Genesis.URI.IsZero() { + return fmt.Errorf("epoch is 0, but genesis.uri is not set") + } + if !c.Genesis.URI.IsValid() { + return fmt.Errorf("genesis.uri is invalid") + } + // only support local genesis files for now: + if !c.Genesis.URI.IsLocal() { + return fmt.Errorf("genesis.uri must be a local file") + } + } + } return nil } diff --git a/epoch.go b/epoch.go index 73eed471..f83165e1 100644 --- a/epoch.go +++ b/epoch.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "bytes" "context" "crypto/rand" "encoding/binary" @@ -12,6 +13,7 @@ import ( "github.com/gagliardetto/solana-go" "github.com/ipfs/go-cid" + carv1 "github.com/ipld/go-car" "github.com/ipld/go-car/util" carv2 "github.com/ipld/go-car/v2" "github.com/libp2p/go-libp2p/core/peer" @@ -22,6 +24,7 @@ import ( "github.com/rpcpool/yellowstone-faithful/gsfa" "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" "github.com/rpcpool/yellowstone-faithful/iplddecoders" + "github.com/rpcpool/yellowstone-faithful/radiance/genesis" "github.com/urfave/cli/v2" "k8s.io/klog/v2" ) @@ -30,20 +33,22 @@ type Epoch struct { epoch uint64 isFilecoinMode bool // true if the epoch is in Filecoin mode (i.e. Lassie mode) config *Config + // genesis: + genesis *GenesisContainer // contains indexes and block data for the epoch - lassieFetcher *lassieWrapper - localCarReader *carv2.Reader - remoteCarReader ReaderAtCloser - remoteCarHeaderSize uint64 - cidToOffsetIndex *compactindex.DB - slotToCidIndex *compactindex36.DB - sigToCidIndex *compactindex36.DB - sigExists *bucketteer.Reader - gsfaReader *gsfa.GsfaReader - cidToNodeCache *cache.Cache // TODO: prevent OOM - onClose []func() error - slotToCidCache *cache.Cache - cidToOffsetCache *cache.Cache + lassieFetcher *lassieWrapper + localCarReader *carv2.Reader + remoteCarReader ReaderAtCloser + carHeaderSize uint64 + cidToOffsetIndex *compactindex.DB + slotToCidIndex *compactindex36.DB + sigToCidIndex *compactindex36.DB + sigExists *bucketteer.Reader + gsfaReader *gsfa.GsfaReader + cidToNodeCache *cache.Cache // TODO: prevent OOM + onClose []func() error + slotToCidCache *cache.Cache + cidToOffsetCache *cache.Cache } func (r *Epoch) getSlotToCidFromCache(slot uint64) (cid.Cid, error, bool) { @@ -92,6 +97,10 @@ func (e *Epoch) Close() error { return errors.Join(multiErr...) } +func (e *Epoch) GetGenesis() *GenesisContainer { + return e.genesis +} + func NewEpochFromConfig(config *Config, c *cli.Context) (*Epoch, error) { if config == nil { return nil, fmt.Errorf("config must not be nil") @@ -105,6 +114,19 @@ func NewEpochFromConfig(config *Config, c *cli.Context) (*Epoch, error) { config: config, onClose: make([]func() error, 0), } + { + // if epoch is 0, then try loading the genesis from the config: + if *config.Epoch == 0 { + genesisConfig, ha, err := genesis.ReadGenesisFromFile(string(config.Genesis.URI)) + if err != nil { + return nil, fmt.Errorf("failed to read genesis: %w", err) + } + ep.genesis = &GenesisContainer{ + Hash: solana.HashFromBytes(ha[:]), + Config: genesisConfig, + } + } + } if isCarMode { // The CAR-mode requires a cid-to-offset index. @@ -207,7 +229,7 @@ func NewEpochFromConfig(config *Config, c *cli.Context) (*Epoch, error) { ep.localCarReader = localCarReader ep.remoteCarReader = remoteCarReader if remoteCarReader != nil { - // read 10 bytes from the CAR file to get the header size + // determine the header size so that we know where the data starts: headerSizeBuf, err := readSectionFromReaderAt(remoteCarReader, 0, 10) if err != nil { return nil, fmt.Errorf("failed to read CAR header: %w", err) @@ -217,7 +239,24 @@ func NewEpochFromConfig(config *Config, c *cli.Context) (*Epoch, error) { if n <= 0 { return nil, fmt.Errorf("failed to decode CAR header size") } - ep.remoteCarHeaderSize = uint64(n) + headerSize + ep.carHeaderSize = uint64(n) + headerSize + } + if localCarReader != nil { + // determine the header size so that we know where the data starts: + dr, err := localCarReader.DataReader() + if err != nil { + return nil, fmt.Errorf("failed to get local CAR data reader: %w", err) + } + header, err := readHeader(dr) + if err != nil { + return nil, fmt.Errorf("failed to read local CAR header: %w", err) + } + var buf bytes.Buffer + if err = carv1.WriteHeader(header, &buf); err != nil { + return nil, fmt.Errorf("failed to encode local CAR header: %w", err) + } + headerSize := uint64(buf.Len()) + ep.carHeaderSize = headerSize } } { diff --git a/genesis.go b/genesis.go new file mode 100644 index 00000000..ad91be58 --- /dev/null +++ b/genesis.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/radiance/genesis" +) + +type GenesisContainer struct { + Hash solana.Hash + // The genesis config. + Config *genesis.Genesis +} diff --git a/gsfa/manifest/manifest.go b/gsfa/manifest/manifest.go index 1cc71516..4f6a7799 100644 --- a/gsfa/manifest/manifest.go +++ b/gsfa/manifest/manifest.go @@ -2,6 +2,7 @@ package manifest import ( "encoding/binary" + "errors" "fmt" "io" "os" @@ -213,7 +214,7 @@ func (m *Manifest) readAllContent() (Values, error) { values := make([][2]uint64, 0, currentContentSize/16) for { _, err := io.ReadFull(sectionReader, buf) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/index-cid-to-offset.go b/index-cid-to-offset.go index ceae6a0e..2907f1db 100644 --- a/index-cid-to-offset.go +++ b/index-cid-to-offset.go @@ -89,7 +89,7 @@ func CreateIndex_cid2offset(ctx context.Context, tmpDir string, carPath string, for { c, sectionLength, err := rd.NextInfo() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return "", err diff --git a/multiepoch-getBlock.go b/multiepoch-getBlock.go index d408a055..9d397b98 100644 --- a/multiepoch-getBlock.go +++ b/multiepoch-getBlock.go @@ -70,8 +70,14 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex { prefetcherFromCar := func() error { parentIsInPreviousEpoch := CalcEpochForSlot(uint64(block.Meta.Parent_slot)) != CalcEpochForSlot(slot) + if slot == 0 { + parentIsInPreviousEpoch = true + } + if slot > 1 && block.Meta.Parent_slot == 0 { + parentIsInPreviousEpoch = true + } - var blockCid, parentCid cid.Cid + var blockCid, parentBlockCid cid.Cid wg := new(errgroup.Group) wg.Go(func() (err error) { blockCid, err = epochHandler.FindCidFromSlot(ctx, slot) @@ -84,7 +90,7 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex if parentIsInPreviousEpoch { return nil } - parentCid, err = epochHandler.FindCidFromSlot(ctx, uint64(block.Meta.Parent_slot)) + parentBlockCid, err = epochHandler.FindCidFromSlot(ctx, uint64(block.Meta.Parent_slot)) if err != nil { return err } @@ -94,7 +100,17 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex if err != nil { return err } - klog.Infof("%s -> %s", parentCid, blockCid) + if slot == 0 { + klog.Infof("car start to slot(0)::%s", blockCid) + } else { + klog.Infof( + "slot(%d)::%s to slot(%d)::%s", + uint64(block.Meta.Parent_slot), + parentBlockCid, + slot, + blockCid, + ) + } { var blockOffset, parentOffset uint64 wg := new(errgroup.Group) @@ -108,13 +124,12 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex wg.Go(func() (err error) { if parentIsInPreviousEpoch { // get car file header size - parentOffset = epochHandler.remoteCarHeaderSize + parentOffset = epochHandler.carHeaderSize return nil } - parentOffset, err = epochHandler.FindOffsetFromCid(ctx, parentCid) + parentOffset, err = epochHandler.FindOffsetFromCid(ctx, parentBlockCid) if err != nil { - // If the parent is not found, it (probably) means that it's outside of the car file. - parentOffset = epochHandler.remoteCarHeaderSize + return err } return nil }) @@ -125,23 +140,12 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex length := blockOffset - parentOffset MiB := uint64(1024 * 1024) - maxSize := MiB * 100 - if length > maxSize { - length = maxSize + maxPrefetchSize := MiB * 10 // let's cap prefetching size + if length > maxPrefetchSize { + length = maxPrefetchSize } - idealEntrySize := uint64(36190) - var start uint64 - if parentIsInPreviousEpoch { - start = parentOffset - } else { - if parentOffset > idealEntrySize { - start = parentOffset - idealEntrySize - } else { - start = parentOffset - } - length += idealEntrySize - } + start := parentOffset klog.Infof("prefetching CAR: start=%d length=%d (parent_offset=%d)", start, length, parentOffset) carSection, err := epochHandler.ReadAtFromCar(ctx, start, length) @@ -149,17 +153,14 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex return err } dr := bytes.NewReader(carSection) - if !parentIsInPreviousEpoch { - dr.Seek(int64(idealEntrySize), io.SeekStart) - } br := bufio.NewReader(dr) gotCid, data, err := util.ReadNode(br) if err != nil { return fmt.Errorf("failed to read first node: %w", err) } - if !parentIsInPreviousEpoch && !gotCid.Equals(parentCid) { - return fmt.Errorf("CID mismatch: expected %s, got %s", parentCid, gotCid) + if !parentIsInPreviousEpoch && !gotCid.Equals(parentBlockCid) { + return fmt.Errorf("CID mismatch: expected %s, got %s", parentBlockCid, gotCid) } epochHandler.putNodeInCache(gotCid, data) @@ -225,8 +226,6 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex klog.Errorf("failed to decode Transaction %s: %v", tcid, err) return nil } - // NOTE: this messes up the order of transactions, - // but we sort them later anyway. mu.Lock() allTransactionNodes[entryIndex][txI] = txNode mu.Unlock() @@ -388,6 +387,7 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex allTransactions = append(allTransactions, txResp) } } + sort.Slice(allTransactions, func(i, j int) bool { return allTransactions[i].Position < allTransactions[j].Position }) @@ -399,6 +399,21 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex blockResp.ParentSlot = uint64(block.Meta.Parent_slot) blockResp.Rewards = rewards + if slot == 0 { + genesis := epochHandler.GetGenesis() + if genesis != nil { + blockZeroBlocktime := uint64(genesis.Config.CreationTime.Unix()) + blockResp.BlockTime = &blockZeroBlocktime + } + blockResp.ParentSlot = uint64(0) + + zeroBlockHeight := uint64(0) + blockResp.BlockHeight = &zeroBlockHeight + + blockZeroBlockHash := lastEntryHash.String() + blockResp.PreviousBlockhash = &blockZeroBlockHash // NOTE: this is what solana RPC does. Should it be nil instead? Or should it be the genesis hash? + } + { blockHeight, ok := block.GetBlockHeight() if ok { @@ -408,7 +423,7 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex { // get parent slot parentSlot := uint64(block.Meta.Parent_slot) - if parentSlot != 0 && CalcEpochForSlot(parentSlot) == epochNumber { + if (parentSlot != 0 || slot == 1) && CalcEpochForSlot(parentSlot) == epochNumber { // NOTE: if the parent is in the same epoch, we can get it from the same epoch handler as the block; // otherwise, we need to get it from the previous epoch (TODO: implement this) parentBlock, err := epochHandler.GetBlock(WithSubrapghPrefetch(ctx, false), parentSlot) @@ -432,11 +447,22 @@ func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContex blockResp.PreviousBlockhash = &parentEntryHash } } else { - klog.Infof("parent slot is in a different epoch, not implemented yet (can't get previousBlockhash)") + if slot != 0 { + klog.Infof("parent slot is in a different epoch, not implemented yet (can't get previousBlockhash)") + } } } tim.time("get parent block") + { + if len(blockResp.Transactions) == 0 { + blockResp.Transactions = make([]GetTransactionResponse, 0) + } + if blockResp.Rewards == nil || len(blockResp.Rewards.([]any)) == 0 { + blockResp.Rewards = make([]any, 0) + } + } + err = conn.Reply( ctx, req.ID, diff --git a/multiepoch-getGenesisHash.go b/multiepoch-getGenesisHash.go new file mode 100644 index 00000000..381c519d --- /dev/null +++ b/multiepoch-getGenesisHash.go @@ -0,0 +1,41 @@ +package main + +import ( + "context" + "fmt" + + "github.com/sourcegraph/jsonrpc2" +) + +func (multi *MultiEpoch) handleGetGenesisHash(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + // Epoch 0 contains the genesis config. + epochNumber := uint64(0) + epochHandler, err := multi.GetEpoch(epochNumber) + if err != nil { + // If epoch 0 is not available, then the genesis config is not available. + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Epoch %d is not available", epochNumber), + }, fmt.Errorf("failed to get epoch %d: %w", epochNumber, err) + } + + genesis := epochHandler.GetGenesis() + if genesis == nil { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: "Genesis is not available", + }, fmt.Errorf("genesis is nil") + } + + genesisHash := genesis.Hash + + err = conn.ReplyRaw( + ctx, + req.ID, + genesisHash.String(), + ) + if err != nil { + return nil, fmt.Errorf("failed to reply: %w", err) + } + return nil, nil +} diff --git a/multiepoch.go b/multiepoch.go index 79628ae4..b2763c29 100644 --- a/multiepoch.go +++ b/multiepoch.go @@ -155,6 +155,16 @@ func (m *MultiEpoch) GetFirstAvailableEpochNumber() (uint64, error) { return 0, fmt.Errorf("no epochs available") } +func (m *MultiEpoch) Close() error { + m.mu.Lock() + defer m.mu.Unlock() + klog.Info("Closing all epochs...") + for _, ep := range m.epochs { + ep.Close() + } + return nil +} + type ListenerConfig struct { ProxyConfig *ProxyConfig } @@ -401,7 +411,7 @@ func sanitizeMethod(method string) string { func isValidLocalMethod(method string) bool { switch method { - case "getBlock", "getTransaction", "getSignaturesForAddress", "getBlockTime": + case "getBlock", "getTransaction", "getSignaturesForAddress", "getBlockTime", "getGenesisHash": return true default: return false @@ -419,6 +429,8 @@ func (ser *MultiEpoch) handleRequest(ctx context.Context, conn *requestContext, return ser.handleGetSignaturesForAddress(ctx, conn, req) case "getBlockTime": return ser.handleGetBlockTime(ctx, conn, req) + case "getGenesisHash": + return ser.handleGetGenesisHash(ctx, conn, req) default: return &jsonrpc2.Error{ Code: jsonrpc2.CodeMethodNotFound, diff --git a/radiance/LICENSE b/radiance/LICENSE new file mode 100644 index 00000000..c817b165 --- /dev/null +++ b/radiance/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Firedancer Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/radiance/archiveutil/archiveutil.go b/radiance/archiveutil/archiveutil.go new file mode 100644 index 00000000..201395ba --- /dev/null +++ b/radiance/archiveutil/archiveutil.go @@ -0,0 +1,56 @@ +// Package archiveutil helps dealing with common archive file formats. +package archiveutil + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "fmt" + "io" +) + +// TODO: zstd support +// TODO: xz support + +// OpenTar opens a `.tar`, `.tar.gz`, or `.tar.bz2` file. +// +// Peeks the first few bytes in the given reader and auto-detects the file format. +// Returns a tar reader spliced together with a decompressor if necessary. +func OpenTar(rawRd io.Reader) (*tar.Reader, error) { + rd := bufio.NewReader(rawRd) + magicBytes, err := rd.Peek(6) + if err != nil { + return nil, fmt.Errorf("failed to detect magic: %w", err) + } + uncompressedRd := io.Reader(rd) + + // Check first few bytes for known compression magics. + if bytes.Equal(magicBytes[:2], []byte("BZ")) { + uncompressedRd = bzip2.NewReader(rd) + } else if bytes.Equal(magicBytes[:3], []byte{0x1f, 0x8b, 0x08}) { + uncompressedRd, err = gzip.NewReader(rd) + if err != nil { + return nil, fmt.Errorf("invalid .tar.gz: %w", err) + } + } else if bytes.Equal(magicBytes[:6], []byte{0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00}) { + return nil, fmt.Errorf(".tar.xz not supported yet") + } else if bytes.Equal(magicBytes[1:4], []byte{0xb5, 0x2f, 0xfd}) { + return nil, fmt.Errorf(".tar.zst not supported yet") + } else { + // Presumed uncompressed case. + // Peek and see if we can find a valid tar header. + peek, err := rd.Peek(1024) + if err != nil { + return nil, err + } + peekTar := tar.NewReader(bytes.NewReader(peek)) + if _, err = peekTar.Next(); err != nil { + // Doesn't seem to be a valid tar header, bail. + return nil, fmt.Errorf("unknown archive format") + } + } + + return tar.NewReader(uncompressedRd), nil +} diff --git a/radiance/genesis/file.go b/radiance/genesis/file.go new file mode 100644 index 00000000..2c4d411f --- /dev/null +++ b/radiance/genesis/file.go @@ -0,0 +1,72 @@ +package genesis + +import ( + "archive/tar" + "crypto/sha256" + "fmt" + "io" + "os" + + bin "github.com/gagliardetto/binary" + "github.com/rpcpool/yellowstone-faithful/radiance/archiveutil" +) + +// ReadGenesisFromFile is a convenience wrapper for ReadGenesisFromArchive. +func ReadGenesisFromFile(fpath string) (genesis *Genesis, hash *[32]byte, err error) { + f, err := os.Open(fpath) + if err != nil { + return nil, nil, err + } + defer f.Close() + return ReadGenesisFromArchive(f) +} + +// ReadGenesisFromArchive reads a `genesis.tar.bz2` file. +func ReadGenesisFromArchive(archive io.Reader) (genesis *Genesis, hash *[32]byte, err error) { + var files *tar.Reader + var hdr *tar.Header + files, err = archiveutil.OpenTar(archive) + if err != nil { + return + } + hdr, err = files.Next() + if err != nil { + return + } + if hdr.Name != "genesis.bin" { + err = fmt.Errorf("first file is not genesis.bin") + return + } + + // Read and hash first file + const maxSize = 10_000_001 + var rd io.Reader + rd = files + hasher := sha256.New() + rd = io.TeeReader(rd, hasher) + rd = io.LimitReader(rd, maxSize) + + // Decode content + var genesisBytes []byte + genesisBytes, err = io.ReadAll(rd) + if err != nil { + return + } + if len(genesisBytes) >= maxSize { + err = fmt.Errorf("genesis.bin too large") + return + } + genesis = new(Genesis) + dec := bin.NewBinDecoder(genesisBytes) + err = dec.Decode(genesis) + if err == nil { + if dec.HasRemaining() { + err = fmt.Errorf("not all of genesis.bin was read (%d bytes remaining)", dec.Remaining()) + } else { + hash = new([32]byte) + hasher.Sum(hash[:0]) + } + } + + return +} diff --git a/radiance/genesis/file_test.go b/radiance/genesis/file_test.go new file mode 100644 index 00000000..3a9f618c --- /dev/null +++ b/radiance/genesis/file_test.go @@ -0,0 +1,67 @@ +package genesis + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/radiance/runtime" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadGenesisFromArchive(t *testing.T) { + f, err := os.Open(filepath.Join("testdata", "mainnet", "genesis.tar.bz2")) + require.NoError(t, err) + defer f.Close() + genesis, _, err := ReadGenesisFromArchive(f) + require.NoError(t, err) + + assert.Equal(t, time.Date(2020, time.March, 16, 14, 29, 0, 0, time.UTC), genesis.CreationTime) + assert.Equal(t, int64(1584368940), genesis.CreationTime.Unix()) + assert.Equal(t, []BuiltinProgram{ + { + Key: "solana_config_program", + Pubkey: solana.MustPublicKeyFromBase58("Config1111111111111111111111111111111111111"), + }, + { + Key: "solana_stake_program", + Pubkey: solana.MustPublicKeyFromBase58("Stake11111111111111111111111111111111111111"), + }, + { + Key: "solana_system_program", + Pubkey: solana.MustPublicKeyFromBase58("11111111111111111111111111111111"), + }, + { + Key: "solana_vote_program", + Pubkey: solana.MustPublicKeyFromBase58("Vote111111111111111111111111111111111111111"), + }, + }, genesis.Builtins) + assert.Equal(t, uint64(0x40), genesis.TicksPerSlot) + assert.Equal(t, runtime.PohParams{ + TickDuration: 6250000, + HasHashesPerTick: true, + HashesPerTick: 12500, + HasTickCount: false, + }, genesis.PohParams) + assert.Equal(t, runtime.FeeParams{ + TargetLamportsPerSig: 10000, + TargetSigsPerSlot: 20000, + MinLamportsPerSig: 5000, + MaxLamportsPerSig: 100000, + BurnPercent: 100, + }, genesis.Fees) + assert.Equal(t, runtime.RentParams{ + LamportsPerByteYear: 3480, + ExemptionThreshold: 2, + BurnPercent: 100, + }, genesis.Rent) + assert.Equal(t, runtime.InflationParams{ /* empty */ }, genesis.Inflation) + assert.Equal(t, runtime.EpochSchedule{ + SlotPerEpoch: 432000, + LeaderScheduleSlotOffset: 432000, + }, genesis.EpochSchedule) + assert.Equal(t, uint32(1), genesis.ClusterID) +} diff --git a/radiance/genesis/genesis.go b/radiance/genesis/genesis.go new file mode 100644 index 00000000..a74bac67 --- /dev/null +++ b/radiance/genesis/genesis.go @@ -0,0 +1,38 @@ +package genesis + +import ( + "time" + + "github.com/rpcpool/yellowstone-faithful/radiance/runtime" +) + +// Genesis contains the genesis state of a Solana ledger. +type Genesis struct { + CreationTime time.Time + Accounts []AccountEntry + Builtins []BuiltinProgram + RewardPools []AccountEntry + TicksPerSlot uint64 + PohParams runtime.PohParams + Fees runtime.FeeParams + Rent runtime.RentParams + Inflation runtime.InflationParams + EpochSchedule runtime.EpochSchedule + ClusterID uint32 +} + +type AccountEntry struct { + Pubkey [32]byte + runtime.Account +} + +type BuiltinProgram struct { + Key string + Pubkey [32]byte +} + +func (g *Genesis) FillAccounts(state runtime.Accounts) { + for _, acc := range g.Accounts { + state.SetAccount(&acc.Pubkey, &acc.Account) + } +} diff --git a/radiance/genesis/serde.go b/radiance/genesis/serde.go new file mode 100644 index 00000000..b28808e9 --- /dev/null +++ b/radiance/genesis/serde.go @@ -0,0 +1,90 @@ +package genesis + +import ( + "io" + "time" + + bin "github.com/gagliardetto/binary" + "github.com/rpcpool/yellowstone-faithful/radiance/runtime" +) + +// Dumping ground for handwritten serialization boilerplate. +// To be removed when switching over to serde-generate. + +func (g *Genesis) UnmarshalWithDecoder(decoder *bin.Decoder) (err error) { + var raw struct { + CreationTime int64 + NumAccounts uint64 `bin:"sizeof=Accounts"` + Accounts []AccountEntry + NumBuiltins uint64 `bin:"sizeof=Builtins"` + Builtins []BuiltinProgram + NumRewardPools uint64 `bin:"sizeof=RewardPools"` + RewardPools []AccountEntry + TicksPerSlot uint64 + Padding00 uint64 + PohParams runtime.PohParams + Padding01 uint64 + Fees runtime.FeeParams + Rent runtime.RentParams + Inflation runtime.InflationParams + EpochSchedule runtime.EpochSchedule + ClusterID uint32 + } + if err = decoder.Decode(&raw); err != nil { + return err + } + *g = Genesis{ + CreationTime: time.Unix(raw.CreationTime, 0).UTC(), + Accounts: raw.Accounts, + Builtins: raw.Builtins, + RewardPools: raw.RewardPools, + TicksPerSlot: raw.TicksPerSlot, + PohParams: raw.PohParams, + Fees: raw.Fees, + Rent: raw.Rent, + Inflation: raw.Inflation, + EpochSchedule: raw.EpochSchedule, + ClusterID: raw.ClusterID, + } + return nil +} + +func (g *Genesis) MarshalWithEncoder(_ *bin.Encoder) (err error) { + // TODO not implemented + panic("not implemented") +} + +func (a *AccountEntry) UnmarshalWithDecoder(decoder *bin.Decoder) (err error) { + if err = decoder.Decode(&a.Pubkey); err != nil { + return err + } + return a.Account.UnmarshalWithDecoder(decoder) +} + +func (a *AccountEntry) MarshalWihEncoder(encoder *bin.Encoder) (err error) { + if err = encoder.WriteBytes(a.Pubkey[:], false); err != nil { + return err + } + return a.Account.MarshalWihEncoder(encoder) +} + +func (b *BuiltinProgram) UnmarshalWithDecoder(decoder *bin.Decoder) (err error) { + var strLen uint64 + if strLen, err = decoder.ReadUint64(bin.LE); err != nil { + return err + } + if strLen > uint64(decoder.Remaining()) { + return io.ErrUnexpectedEOF + } + var strBytes []byte + if strBytes, err = decoder.ReadNBytes(int(strLen)); err != nil { + return err + } + b.Key = string(strBytes) + return decoder.Decode(&b.Pubkey) +} + +func (*BuiltinProgram) MarshalWihEncoder(_ *bin.Encoder) (err error) { + // TODO not implemented + panic("not implemented") +} diff --git a/radiance/genesis/testdata/mainnet/genesis.tar.bz2 b/radiance/genesis/testdata/mainnet/genesis.tar.bz2 new file mode 100644 index 00000000..a8ae5c67 Binary files /dev/null and b/radiance/genesis/testdata/mainnet/genesis.tar.bz2 differ diff --git a/radiance/runtime/runtime.go b/radiance/runtime/runtime.go new file mode 100644 index 00000000..b944faa0 --- /dev/null +++ b/radiance/runtime/runtime.go @@ -0,0 +1,75 @@ +// Package runtime provides low-level components of the Solana Execution Layer. +package runtime + +import "time" + +type Account struct { + Lamports uint64 + Data []byte + Owner [32]byte + Executable bool + RentEpoch uint64 +} + +type PohParams struct { + TickDuration time.Duration + HasTickCount bool + TickCount uint64 + HasHashesPerTick bool + HashesPerTick uint64 +} + +type InflationParams struct { + Initial float64 + Terminal float64 + Taper float64 + Foundation float64 + FoundationTerm float64 + Padding00 [8]byte +} + +type EpochSchedule struct { + SlotPerEpoch uint64 + LeaderScheduleSlotOffset uint64 + Warmup bool + FirstNormalEpoch uint64 + FirstNormalSlot uint64 +} + +type FeeParams struct { + TargetLamportsPerSig uint64 + TargetSigsPerSlot uint64 + MinLamportsPerSig uint64 + MaxLamportsPerSig uint64 + BurnPercent uint8 +} + +type RentParams struct { + LamportsPerByteYear uint64 + ExemptionThreshold float64 + BurnPercent uint8 +} + +type Accounts interface { + GetAccount(pubkey *[32]byte) (*Account, error) + SetAccount(pubkey *[32]byte, acc *Account) error +} + +type MemAccounts struct { + Map map[[32]byte]*Account +} + +func NewMemAccounts() MemAccounts { + return MemAccounts{ + Map: make(map[[32]byte]*Account), + } +} + +func (m MemAccounts) GetAccount(pubkey *[32]byte) (*Account, error) { + return m.Map[*pubkey], nil +} + +func (m MemAccounts) SetAccount(pubkey *[32]byte, acc *Account) error { + m.Map[*pubkey] = acc + return nil +} diff --git a/radiance/runtime/serde.go b/radiance/runtime/serde.go new file mode 100644 index 00000000..855c3b83 --- /dev/null +++ b/radiance/runtime/serde.go @@ -0,0 +1,118 @@ +package runtime + +import ( + "fmt" + "io" + "math" + "time" + + bin "github.com/gagliardetto/binary" +) + +// Dumping ground for handwritten serialization boilerplate. +// To be removed when switching over to serde-generate. + +func (a *Account) UnmarshalWithDecoder(decoder *bin.Decoder) (err error) { + a.Lamports, err = decoder.ReadUint64(bin.LE) + if err != nil { + return err + } + var dataLen uint64 + dataLen, err = decoder.ReadUint64(bin.LE) + if err != nil { + return err + } + if dataLen > uint64(decoder.Remaining()) { + return io.ErrUnexpectedEOF + } + a.Data, err = decoder.ReadNBytes(int(dataLen)) + if err != nil { + return err + } + if err = decoder.Decode(&a.Owner); err != nil { + return err + } + a.Executable, err = decoder.ReadBool() + if err != nil { + return err + } + a.RentEpoch, err = decoder.ReadUint64(bin.LE) + return +} + +func (a *Account) MarshalWihEncoder(encoder *bin.Encoder) error { + _ = encoder.WriteUint64(a.Lamports, bin.LE) + _ = encoder.WriteUint64(uint64(len(a.Data)), bin.LE) + _ = encoder.WriteBytes(a.Data, false) + _ = encoder.WriteBytes(a.Owner[:], false) + _ = encoder.WriteBool(a.Executable) + return encoder.WriteUint64(a.RentEpoch, bin.LE) +} + +func (a *PohParams) UnmarshalWithDecoder(decoder *bin.Decoder) (err error) { + var tickDuration serdeDuration + if err = decoder.Decode(&tickDuration); err != nil { + return err + } + if a.TickDuration, err = tickDuration.Duration(); err != nil { + return err + } + if a.HasTickCount, err = decoder.ReadBool(); err != nil { + return err + } + if a.HasTickCount { + if a.TickCount, err = decoder.ReadUint64(bin.LE); err != nil { + return err + } + } + if a.HasHashesPerTick, err = decoder.ReadBool(); err != nil { + return err + } + if a.HasHashesPerTick { + if a.HashesPerTick, err = decoder.ReadUint64(bin.LE); err != nil { + return err + } + } + return nil +} + +func (a *PohParams) MarshalWithDecoder(encoder *bin.Encoder) (err error) { + tickDuration := newSerdeDuration(a.TickDuration) + _ = encoder.Encode(&tickDuration) + _ = encoder.WriteBool(a.HasTickCount) + if a.HasTickCount { + _ = encoder.WriteUint64(a.TickCount, bin.LE) + } + _ = encoder.WriteBool(a.HasHashesPerTick) + if a.HasHashesPerTick { + _ = encoder.WriteUint64(a.HashesPerTick, bin.LE) + } + return nil +} + +// serdeDuration implements the bincode serialization of std::time::Duration. +type serdeDuration struct { + Secs uint64 + Nanos uint32 +} + +func newSerdeDuration(d time.Duration) serdeDuration { + if d < 0 { + panic("negative duration") + } + return serdeDuration{ + Secs: uint64(d / time.Second), + Nanos: uint32(d % time.Second), + } +} + +func (s serdeDuration) Duration() (time.Duration, error) { + if time.Duration(s.Nanos) > time.Second { + return 0, fmt.Errorf("malformed serde duration") + } + if s.Secs > uint64(time.Duration(math.MaxInt64)/time.Second) { + return 0, fmt.Errorf("malformed serde duration") + } + d := time.Duration(s.Nanos) + (time.Duration(s.Secs) * time.Second) + return d, nil +} diff --git a/readahead/readahead.go b/readahead/readahead.go index a804bb71..124514b0 100644 --- a/readahead/readahead.go +++ b/readahead/readahead.go @@ -2,6 +2,7 @@ package readahead import ( "bytes" + "errors" "fmt" "io" "os" @@ -78,7 +79,7 @@ func (cr *CachingReader) Read(p []byte) (int, error) { if n > 0 { cr.buffer.Write(tmp[:n]) } - if err == io.EOF && cr.buffer.Len() == 0 { + if errors.Is(err, io.EOF) && cr.buffer.Len() == 0 { // If EOF is reached and buffer is empty, return EOF return 0, io.EOF } diff --git a/readers.go b/readers.go index 5fb53e1e..f5770033 100644 --- a/readers.go +++ b/readers.go @@ -172,7 +172,7 @@ func isDirEmpty(dir string) (bool, error) { defer file.Close() _, err = file.Readdir(1) - if err == io.EOF { + if errors.Is(err, io.EOF) { return true, nil } return false, err @@ -202,7 +202,7 @@ func carCountItems(carPath string) (uint64, error) { for { _, _, err := rd.NextInfo() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return 0, err @@ -230,7 +230,7 @@ func carCountItemsByFirstByte(carPath string) (map[byte]uint64, error) { for { _, _, block, err := rd.NextNode() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err diff --git a/store/index/gc.go b/store/index/gc.go index d9e6c8b2..ebfadde5 100644 --- a/store/index/gc.go +++ b/store/index/gc.go @@ -8,6 +8,7 @@ package index import ( "context" "encoding/binary" + "errors" "fmt" "io" "os" @@ -308,7 +309,7 @@ func (index *Index) reapIndexRecords(ctx context.Context, fileNum uint32, indexP return false, ctx.Err() } if _, err = file.ReadAt(sizeBuf, pos); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { // Finished reading entire index. break } @@ -348,7 +349,7 @@ func (index *Index) reapIndexRecords(ctx context.Context, fileNum uint32, indexP } data := scratch[:size] if _, err = file.ReadAt(data, pos+sizePrefixSize); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { // The data has not been written yet, or the file is corrupt. // Take the data we are able to use and move on. break diff --git a/store/index/index.go b/store/index/index.go index 83a68779..66015daa 100644 --- a/store/index/index.go +++ b/store/index/index.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "os" @@ -362,7 +363,7 @@ func scanIndexFile(ctx context.Context, basePath string, fileNum uint32, buckets var i int for { if _, err = file.ReadAt(sizeBuffer, pos); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { // Finished reading entire index. break } @@ -392,7 +393,7 @@ func scanIndexFile(ctx context.Context, basePath string, fileNum uint32, buckets } data := scratch[:size] if _, err = file.ReadAt(data, pos); err != nil { - if err == io.ErrUnexpectedEOF || err == io.EOF { + if err == io.ErrUnexpectedEOF || errors.Is(err, io.EOF) { // The file is corrupt since the expected data could not be // read. Take the usable data and move on. log.Errorw("Unexpected EOF scanning index record", "file", indexPath) @@ -1059,7 +1060,7 @@ func (iter *RawIterator) Next() ([]byte, types.Position, bool, error) { _, err := iter.file.ReadAt(sizeBuf, iter.pos) if err != nil { iter.file.Close() - if err == io.EOF { + if errors.Is(err, io.EOF) { iter.file = nil iter.fileNum++ return iter.Next() @@ -1488,7 +1489,7 @@ func MoveFiles(indexPath, newDir string) error { for { fileName, err := fileIter.next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return err diff --git a/store/index/upgrade.go b/store/index/upgrade.go index 5d6ce441..09a3b2ab 100644 --- a/store/index/upgrade.go +++ b/store/index/upgrade.go @@ -9,6 +9,7 @@ import ( "bufio" "context" "encoding/binary" + "errors" "fmt" "io" "os" @@ -91,7 +92,7 @@ func chunkOldIndex(ctx context.Context, file *os.File, name string, fileSizeLimi for { _, err = io.ReadFull(reader, sizeBuffer) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return 0, err diff --git a/store/index/upgrade_test.go b/store/index/upgrade_test.go index b00baf36..30846c9d 100644 --- a/store/index/upgrade_test.go +++ b/store/index/upgrade_test.go @@ -104,7 +104,7 @@ func testScanIndexFile(file *os.File, fileNum uint32, buckets Buckets, prevSize for { _, err := io.ReadFull(buffered, sizeBuffer) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return err @@ -119,7 +119,7 @@ func testScanIndexFile(file *os.File, fileNum uint32, buckets Buckets, prevSize data := scratch[:size] _, err = io.ReadFull(buffered, data) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return errors.New("unexpected EOF") } return err diff --git a/store/primary/gsfaprimary/gsfaprimary.go b/store/primary/gsfaprimary/gsfaprimary.go index 5779d404..d0463422 100644 --- a/store/primary/gsfaprimary/gsfaprimary.go +++ b/store/primary/gsfaprimary/gsfaprimary.go @@ -496,7 +496,7 @@ func (iter *Iterator) Next() ([]byte, []byte, error) { _, err := iter.file.ReadAt(data, pos) if err != nil { iter.file.Close() - // if err == io.EOF { + // if errors.Is(err, io.EOF) { // err = io.ErrUnexpectedEOF // } return nil, nil, err diff --git a/store/primary/gsfaprimary/upgrade_test.go b/store/primary/gsfaprimary/upgrade_test.go index 0b99202b..f8626cbc 100644 --- a/store/primary/gsfaprimary/upgrade_test.go +++ b/store/primary/gsfaprimary/upgrade_test.go @@ -130,7 +130,7 @@ func testScanPrimaryFile(file *os.File) ([][]byte, error) { for { _, err := io.ReadFull(buffered, sizeBuffer) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err @@ -143,7 +143,7 @@ func testScanPrimaryFile(file *os.File) ([][]byte, error) { data := scratch[:size] _, err = io.ReadFull(buffered, data) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("unexpected EOF") } return nil, err diff --git a/store/primary/sig2epochprimary/upgrade_test.go b/store/primary/sig2epochprimary/upgrade_test.go index 4ea0bdfe..ad33b55c 100644 --- a/store/primary/sig2epochprimary/upgrade_test.go +++ b/store/primary/sig2epochprimary/upgrade_test.go @@ -130,7 +130,7 @@ func testScanPrimaryFile(file *os.File) ([][]byte, error) { for { _, err := io.ReadFull(buffered, sizeBuffer) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err @@ -143,7 +143,7 @@ func testScanPrimaryFile(file *os.File) ([][]byte, error) { data := scratch[:size] _, err = io.ReadFull(buffered, data) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("unexpected EOF") } return nil, err diff --git a/store/store_test.go b/store/store_test.go index faba3c2d..9591150b 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -2,6 +2,7 @@ package store_test import ( "context" + "errors" "io" "os" "path/filepath" @@ -89,7 +90,7 @@ func TestUpdate(t *testing.T) { var count int for { key, val, err := storeIter.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } require.Zero(t, count)