diff --git a/adapters.go b/adapters.go new file mode 100644 index 00000000..9378e802 --- /dev/null +++ b/adapters.go @@ -0,0 +1,248 @@ +package main + +import ( + "encoding/base64" + + "github.com/mr-tron/base58" +) + +func ptrToUint64(v uint64) *uint64 { + return &v +} + +// byteSliceAsIntegerSlice converts a byte slice to an integer slice. +func byteSliceAsIntegerSlice(b []byte) []uint64 { + var ret []uint64 + for i := 0; i < len(b); i++ { + ret = append(ret, uint64(b[i])) + } + return ret +} + +// adaptTransactionMetaToExpectedOutput adapts the transaction meta to the expected output +// as per what solana RPC server returns. +func adaptTransactionMetaToExpectedOutput(m map[string]any) map[string]any { + meta, ok := m["meta"].(map[string]any) + if !ok { + return m + } + { + if _, ok := meta["err"]; ok { + meta["err"], _ = parseTransactionError(meta["err"]) + } else { + meta["err"] = nil + } + } + { + if _, ok := meta["loadedAddresses"]; !ok { + meta["loadedAddresses"] = map[string]any{ + "readonly": []any{}, + "writable": []any{}, + } + } + { + // if has loadedReadonlyAddresses and is []string, then use that for loadedAddresses.readonly + if loadedReadonlyAddresses, ok := meta["loadedReadonlyAddresses"].([]any); ok { + // the address list is base64 encoded; decode and encode to base58 + for i, addr := range loadedReadonlyAddresses { + addrStr, ok := addr.(string) + if ok { + decoded, err := base64.StdEncoding.DecodeString(addrStr) + if err == nil { + loadedReadonlyAddresses[i] = base58.Encode(decoded) + } + } + } + meta["loadedAddresses"].(map[string]any)["readonly"] = loadedReadonlyAddresses + delete(meta, "loadedReadonlyAddresses") + } + // if has loadedWritableAddresses and is []string, then use that for loadedAddresses.writable + if loadedWritableAddresses, ok := meta["loadedWritableAddresses"].([]any); ok { + // the address list is base64 encoded; decode and encode to base58 + for i, addr := range loadedWritableAddresses { + addrStr, ok := addr.(string) + if ok { + decoded, err := base64.StdEncoding.DecodeString(addrStr) + if err == nil { + loadedWritableAddresses[i] = base58.Encode(decoded) + } + } + } + meta["loadedAddresses"].(map[string]any)["writable"] = loadedWritableAddresses + delete(meta, "loadedWritableAddresses") + } + // remove loadedReadonlyAddresses and loadedWritableAddresses + } + if preTokenBalances, ok := meta["preTokenBalances"]; !ok { + meta["preTokenBalances"] = []any{} + } else { + // in preTokenBalances.[].uiTokenAmount.decimals if not present, set to 0 + preTokenBalances, ok := preTokenBalances.([]any) + if ok { + for _, preTokenBalanceAny := range preTokenBalances { + preTokenBalance, ok := preTokenBalanceAny.(map[string]any) + if ok { + uiTokenAmountAny, ok := preTokenBalance["uiTokenAmount"] + if ok { + uiTokenAmount, ok := uiTokenAmountAny.(map[string]any) + if ok { + _, ok := uiTokenAmount["decimals"] + if !ok { + uiTokenAmount["decimals"] = 0 + } + _, ok = uiTokenAmount["uiAmount"] + if !ok { + uiTokenAmount["uiAmount"] = nil + } + } + } + } + } + } + } + if postTokenBalances, ok := meta["postTokenBalances"]; !ok { + meta["postTokenBalances"] = []any{} + } else { + // in postTokenBalances.[].uiTokenAmount.decimals if not present, set to 0 + postTokenBalances, ok := postTokenBalances.([]any) + if ok { + for _, postTokenBalanceAny := range postTokenBalances { + postTokenBalance, ok := postTokenBalanceAny.(map[string]any) + if ok { + uiTokenAmountAny, ok := postTokenBalance["uiTokenAmount"] + if ok { + uiTokenAmount, ok := uiTokenAmountAny.(map[string]any) + if ok { + _, ok := uiTokenAmount["decimals"] + if !ok { + uiTokenAmount["decimals"] = 0 + } + _, ok = uiTokenAmount["uiAmount"] + if !ok { + uiTokenAmount["uiAmount"] = nil + } + _, ok = uiTokenAmount["amount"] + if !ok { + uiTokenAmount["amount"] = "0" + } + _, ok = uiTokenAmount["uiAmountString"] + if !ok { + uiTokenAmount["uiAmountString"] = "0" + } + } + } + } + } + } + } + + delete(meta, "returnDataNone") + + if _, ok := meta["rewards"]; !ok { + meta["rewards"] = []any{} + } + if _, ok := meta["status"]; !ok { + eee, ok := meta["err"] + if ok { + if eee == nil { + meta["status"] = map[string]any{ + "Ok": nil, + } + } else { + meta["status"] = map[string]any{ + "Err": eee, + } + } + } + } + { + // TODO: is this correct? + // if doesn't have err, but has status and it is empty, then set status to Ok + if _, ok := meta["err"]; !ok || meta["err"] == nil { + if status, ok := meta["status"].(map[string]any); ok { + if len(status) == 0 { + meta["status"] = map[string]any{ + "Ok": nil, + } + } + } + } + } + } + { + if returnData, ok := meta["returnData"].(map[string]any); ok { + if data, ok := returnData["data"].(string); ok { + returnData["data"] = []any{data, "base64"} + } + + if programId, ok := returnData["programId"].(string); ok { + decoded, err := base64.StdEncoding.DecodeString(programId) + if err == nil { + returnData["programId"] = base58.Encode(decoded) + } + } + } + } + { + innerInstructionsAny, ok := meta["innerInstructions"] + if !ok { + meta["innerInstructions"] = []any{} + return m + } + innerInstructions, ok := innerInstructionsAny.([]any) + if !ok { + return m + } + for i, innerInstructionAny := range innerInstructions { + innerInstruction, ok := innerInstructionAny.(map[string]any) + if !ok { + continue + } + // If doesn't have `index`, then set it to 0 + if _, ok := innerInstruction["index"]; !ok { + innerInstruction["index"] = 0 + } + instructionsAny, ok := innerInstruction["instructions"] + if !ok { + continue + } + instructions, ok := instructionsAny.([]any) + if !ok { + continue + } + for _, instructionAny := range instructions { + instruction, ok := instructionAny.(map[string]any) + if !ok { + continue + } + { + if accounts, ok := instruction["accounts"]; ok { + // as string + accountsStr, ok := accounts.(string) + if ok { + decoded, err := base64.StdEncoding.DecodeString(accountsStr) + if err == nil { + instruction["accounts"] = byteSliceAsIntegerSlice(decoded) + } + } + } else { + instruction["accounts"] = []any{} + } + if data, ok := instruction["data"]; ok { + // as string + dataStr, ok := data.(string) + if ok { + decoded, err := base64.StdEncoding.DecodeString(dataStr) + if err == nil { + // TODO: the data in the `innerInstructions` is always base58 encoded (even if the transaction is base64 encoded) + instruction["data"] = base58.Encode(decoded) + } + } + } + } + } + meta["innerInstructions"].([]any)[i] = innerInstruction + } + } + return m +} diff --git a/bucketteer/bucketteer.go b/bucketteer/bucketteer.go new file mode 100644 index 00000000..43bf18ca --- /dev/null +++ b/bucketteer/bucketteer.go @@ -0,0 +1,38 @@ +package bucketteer + +import ( + "sort" + + "github.com/cespare/xxhash/v2" +) + +var _Magic = [8]byte{'b', 'u', 'c', 'k', 'e', 't', 't', 'e'} + +func Magic() [8]byte { + return _Magic +} + +const Version = uint64(1) + +func sortWithCompare[T any](a []T, compare func(i, j int) int) { + sort.Slice(a, func(i, j int) bool { + return compare(i, j) < 0 + }) + sorted := make([]T, len(a)) + eytzinger(a, sorted, 0, 1) + copy(a, sorted) +} + +func eytzinger[T any](in, out []T, i, k int) int { + if k <= len(in) { + i = eytzinger(in, out, i, 2*k) + out[k-1] = in[i] + i++ + i = eytzinger(in, out, i, 2*k+1) + } + return i +} + +func Hash(sig [64]byte) uint64 { + return xxhash.Sum64(sig[:]) +} diff --git a/bucketteer/bucketteer_test.go b/bucketteer/bucketteer_test.go new file mode 100644 index 00000000..99e4e2a5 --- /dev/null +++ b/bucketteer/bucketteer_test.go @@ -0,0 +1,188 @@ +package bucketteer + +import ( + "os" + "path/filepath" + "testing" + + bin "github.com/gagliardetto/binary" + "github.com/stretchr/testify/require" + "golang.org/x/exp/mmap" +) + +func TestBucketteer(t *testing.T) { + path := filepath.Join(t.TempDir(), "test-bucketteer") + wr, err := NewWriter(path) + require.NoError(t, err) + firstSig := [64]byte{1, 2, 3, 4} + wr.Put(firstSig) + + if !wr.Has(firstSig) { + t.Fatal("expected to have firstSig") + } + { + sig := [64]byte{1, 2, 3, 5} + require.False(t, wr.Has(sig)) + wr.Put(sig) + require.True(t, wr.Has(sig)) + } + { + sig := [64]byte{1, 2, 3, 6} + require.False(t, wr.Has(sig)) + wr.Put(sig) + require.True(t, wr.Has(sig)) + } + { + sig := [64]byte{22, 2, 3, 6} + require.False(t, wr.Has(sig)) + wr.Put(sig) + require.True(t, wr.Has(sig)) + } + { + sig := [64]byte{99, 2, 3, 6} + require.False(t, wr.Has(sig)) + wr.Put(sig) + require.True(t, wr.Has(sig)) + } + require.Equal(t, 3, len(wr.prefixToHashes)) + { + gotSize, err := wr.Seal(map[string]string{ + "epoch": "test", + }) + require.NoError(t, err) + require.NoError(t, wr.Close()) + realSize, err := getFizeSize(path) + require.NoError(t, err) + require.Equal(t, realSize, gotSize) + + fileContent, err := os.ReadFile(path) + require.NoError(t, err) + + reader := bin.NewBorshDecoder(fileContent) + + // read header size: + headerSize, err := reader.ReadUint32(bin.LE) + require.NoError(t, err) + require.Equal(t, uint32(8+8+8+(8+(4+5)+(4+4))+(3*(2+8))), headerSize) + + // magic: + { + magicBuf := [8]byte{} + _, err := reader.Read(magicBuf[:]) + require.NoError(t, err) + require.Equal(t, _Magic, magicBuf) + } + // version: + { + got, err := reader.ReadUint64(bin.LE) + require.NoError(t, err) + require.Equal(t, Version, got) + } + { + // read meta: + numMeta, err := reader.ReadUint64(bin.LE) + require.NoError(t, err) + require.Equal(t, uint64(1), numMeta) + + key, err := reader.ReadString() + require.NoError(t, err) + require.Equal(t, "epoch", key) + + value, err := reader.ReadString() + require.NoError(t, err) + require.Equal(t, "test", value) + } + // numPrefixes: + numPrefixes, err := reader.ReadUint64(bin.LE) + require.NoError(t, err) + require.Equal(t, uint64(3), numPrefixes) + // prefix -> offset: + prefixToOffset := make(map[[2]byte]uint64) + { + for i := 0; i < int(numPrefixes); i++ { + var prefix [2]byte + _, err := reader.Read(prefix[:]) + require.NoError(t, err) + offset, err := reader.ReadUint64(bin.LE) + require.NoError(t, err) + prefixToOffset[prefix] = offset + } + } + { + require.Equal(t, + map[[2]uint8]uint64{ + {0x1, 0x2}: 0x0, + {0x16, 0x2}: 0x1c, + {0x63, 0x2}: 0x28, + }, prefixToOffset) + } + contentBuf, err := reader.ReadNBytes(reader.Remaining()) + require.NoError(t, err) + require.Equal(t, + []byte{ + 0x3, 0x0, 0x0, 0x0, // num entries + 0x49, 0xd7, 0xaf, 0x9e, 0x94, 0x4d, 0x9a, 0x6f, + 0x2f, 0x12, 0xdb, 0x5b, 0x1, 0x62, 0xae, 0x1a, + 0x3b, 0xb6, 0x71, 0x5f, 0x4, 0x4f, 0x36, 0xf2, + 0x1, 0x0, 0x0, 0x0, // num entries + 0x58, 0xe1, 0x9d, 0xde, 0x7c, 0xfb, 0xeb, 0x5a, + 0x1, 0x0, 0x0, 0x0, // num entries + 0x4c, 0xbd, 0xa3, 0xed, 0xd3, 0x8b, 0xa8, 0x44, + }, + contentBuf, + ) + contentReader := bin.NewBorshDecoder(contentBuf) + { + for prefix, offset := range prefixToOffset { + // Now read the bucket: + { + err := contentReader.SetPosition(uint(offset)) + require.NoError(t, err) + numHashes, err := contentReader.ReadUint32(bin.LE) + require.NoError(t, err) + switch prefix { + case [2]byte{1, 2}: + require.Equal(t, uint32(3), numHashes) + case [2]byte{22, 2}: + require.Equal(t, uint32(1), numHashes) + case [2]byte{99, 2}: + require.Equal(t, uint32(1), numHashes) + } + + for i := 0; i < int(numHashes); i++ { + hash, err := contentReader.ReadUint64(bin.LE) + require.NoError(t, err) + found := false + for _, h := range wr.prefixToHashes[prefix] { + if h == hash { + found = true + break + } + } + require.True(t, found) + } + } + } + } + { + // read temp file: + require.NoError(t, err) + mmr, err := mmap.Open(path) + require.NoError(t, err) + defer mmr.Close() + reader, err := NewReader(mmr) + require.NoError(t, err) + ok, err := reader.Has(firstSig) + require.NoError(t, err) + require.True(t, ok) + } + } +} + +func getFizeSize(path string) (int64, error) { + info, err := os.Stat(path) + if err != nil { + return 0, err + } + return info.Size(), nil +} diff --git a/bucketteer/example/main.go b/bucketteer/example/main.go new file mode 100644 index 00000000..f6afffcf --- /dev/null +++ b/bucketteer/example/main.go @@ -0,0 +1,146 @@ +package main + +import ( + "crypto/rand" + "flag" + "fmt" + "os" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" + "github.com/rpcpool/yellowstone-faithful/bucketteer" + "golang.org/x/exp/mmap" +) + +func main() { + startedAt := time.Now() + defer func() { + fmt.Printf("took: %v\n", time.Since(startedAt)) + }() + var numItemsToInsert int + flag.IntVar(&numItemsToInsert, "num", 1_000_000, "num") + flag.Parse() + + file := flag.Arg(0) // "bucketteer.bin" + if file == "" { + panic("no file specified") + } + + samples := make([][64]byte, 0) + if !fileExistsAndIsNotEmpty(file) { + fmt.Println("File does not exist or is empty, creating it...") + fmt.Println("Items to insert:", humanize.Comma(int64(numItemsToInsert))) + totalWriteStartedAt := time.Now() + buWr, err := bucketteer.NewWriter(file) + if err != nil { + panic(err) + } + defer buWr.Close() + tookBatch := time.Duration(0) + for i := 1; i <= numItemsToInsert; i++ { + sig := newRandomSignature() + startedSet := time.Now() + buWr.Put(sig) + tookBatch += time.Since(startedSet) + if i%100_000 == 0 { + fmt.Print(".") + samples = append(samples, sig) + } + if i%1_000_000 == 0 { + fmt.Print(humanize.Comma(int64(i))) + fmt.Printf( + " ยท took: %v (%s per item)\n", + tookBatch, + tookBatch/time.Duration(1_000_000), + ) + tookBatch = 0 + } + } + + fmt.Println("writing to file...") + writeStartedAt := time.Now() + _, err = buWr.Seal(nil) + if err != nil { + panic(err) + } + fmt.Println("writing to file took:", time.Since(writeStartedAt)) + fmt.Println("total write took:", time.Since(totalWriteStartedAt)) + } + mmr, err := mmap.Open(file) + if err != nil { + panic(err) + } + defer mmr.Close() + buRd, err := bucketteer.NewReader(mmr) + if err != nil { + panic(err) + } + spew.Dump(buRd.Meta()) + if len(samples) > 0 { + fmt.Println("testing search with samples from the inserted signatures...") + tookBatch := time.Duration(0) + for _, sig := range samples { + startedSearch := time.Now() + found, err := buRd.Has(sig) + if err != nil { + panic(err) + } + if !found { + panic("not found") + } + tookBatch += time.Since(startedSearch) + } + fmt.Println("\n"+" num samples:", len(samples)) + fmt.Println(" search took:", tookBatch) + fmt.Println("avg search took:", tookBatch/time.Duration(len(samples))) + } + if true { + // now search for random signatures that are not in the Bucketteer: + numSearches := 100_000_000 + fmt.Println( + "testing search for random signatures that are not in the Bucketteer (numSearches:", + humanize.Comma(int64(numSearches)), + ")...", + ) + tookBatch := time.Duration(0) + for i := 1; i <= numSearches; i++ { + sig := newRandomSignature() + startedSearch := time.Now() + found, err := buRd.Has(sig) + if err != nil { + panic(err) + } + if found { + panic("found") + } + tookBatch += time.Since(startedSearch) + if i%100_000 == 0 { + fmt.Print(".") + } + } + fmt.Println("\n"+" num candidates:", humanize.Comma(int64(numSearches))) + fmt.Println(" search took:", tookBatch) + fmt.Println("avg search took:", tookBatch/time.Duration(numSearches)) + } +} + +func newRandomSignature() [64]byte { + var sig [64]byte + rand.Read(sig[:]) + return sig +} + +func fileExistsAndIsNotEmpty(path string) bool { + info, err := os.Stat(path) + if os.IsNotExist(err) { + return false + } + if err != nil { + panic(err) + } + if info.Size() == 0 { + return false + } + return true +} diff --git a/bucketteer/read.go b/bucketteer/read.go new file mode 100644 index 00000000..7c7d2c95 --- /dev/null +++ b/bucketteer/read.go @@ -0,0 +1,203 @@ +package bucketteer + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + + bin "github.com/gagliardetto/binary" + "golang.org/x/exp/mmap" +) + +type Reader struct { + contentReader io.ReaderAt + meta map[string]string + prefixToOffset map[[2]byte]uint64 +} + +// Open opens a Bucketteer file in read-only mode, +// using memory-mapped IO. +func Open(path string) (*Reader, error) { + file, err := mmap.Open(path) + if err != nil { + return nil, err + } + return NewReader(file) +} + +func NewReader(reader io.ReaderAt) (*Reader, error) { + r := &Reader{ + prefixToOffset: make(map[[2]byte]uint64), + } + prefixToOffset, meta, headerTotalSize, err := readHeader(reader) + if err != nil { + return nil, err + } + r.meta = meta + r.prefixToOffset = prefixToOffset + r.contentReader = io.NewSectionReader(reader, headerTotalSize, 1<<63-1) + return r, nil +} + +func (r *Reader) Close() error { + if closer, ok := r.contentReader.(io.Closer); ok { + return closer.Close() + } + return nil +} + +func (r *Reader) Meta() map[string]string { + return r.meta +} + +// GetMeta returns the value of the given key. +// Returns an empty string if the key does not exist. +func (r *Reader) GetMeta(key string) string { + return r.meta[key] +} + +func readHeaderSize(reader io.ReaderAt) (int64, error) { + // read header size: + headerSizeBuf := make([]byte, 4) + if _, err := reader.ReadAt(headerSizeBuf, 0); err != nil { + return 0, err + } + headerSize := int64(binary.LittleEndian.Uint32(headerSizeBuf)) + return headerSize, nil +} + +func readHeader(reader io.ReaderAt) (map[[2]byte]uint64, map[string]string, int64, error) { + // read header size: + headerSize, err := readHeaderSize(reader) + if err != nil { + return nil, nil, 0, err + } + // read header bytes: + headerBuf := make([]byte, headerSize) + if _, err := reader.ReadAt(headerBuf, 4); err != nil { + return nil, nil, 0, err + } + // decode header: + decoder := bin.NewBorshDecoder(headerBuf) + + // magic: + { + magicBuf := make([]byte, len(_Magic[:])) + _, err := decoder.Read(magicBuf) + if err != nil { + return nil, nil, 0, err + } + if !bytes.Equal(magicBuf, _Magic[:]) { + return nil, nil, 0, fmt.Errorf("invalid magic: %x", string(magicBuf)) + } + } + // version: + { + got, err := decoder.ReadUint64(bin.LE) + if err != nil { + return nil, nil, 0, err + } + if got != Version { + return nil, nil, 0, fmt.Errorf("expected version %d, got %d", Version, got) + } + } + { + // read meta: + numMeta, err := decoder.ReadUint64(bin.LE) + if err != nil { + return nil, nil, 0, err + } + meta := make(map[string]string, numMeta) + for i := uint64(0); i < numMeta; i++ { + key, err := decoder.ReadString() + if err != nil { + return nil, nil, 0, err + } + value, err := decoder.ReadString() + if err != nil { + return nil, nil, 0, err + } + meta[key] = value + } + } + // numPrefixes: + numPrefixes, err := decoder.ReadUint64(bin.LE) + if err != nil { + return nil, nil, 0, err + } + // prefix -> offset: + prefixToOffset := make(map[[2]byte]uint64, numPrefixes) + for i := uint64(0); i < numPrefixes; i++ { + var prefix [2]byte + _, err := decoder.Read(prefix[:]) + if err != nil { + return nil, nil, 0, err + } + offset, err := decoder.ReadUint64(bin.LE) + if err != nil { + return nil, nil, 0, err + } + prefixToOffset[prefix] = offset + } + return prefixToOffset, nil, headerSize + 4, err +} + +func (r *Reader) Has(sig [64]byte) (bool, error) { + prefix := [2]byte{sig[0], sig[1]} + offset, ok := r.prefixToOffset[prefix] + if !ok { + return false, nil + } + // numHashes: + numHashesBuf := make([]byte, 4) + _, err := r.contentReader.ReadAt(numHashesBuf, int64(offset)) + if err != nil { + return false, err + } + numHashes := binary.LittleEndian.Uint32(numHashesBuf) + bucketReader := io.NewSectionReader(r.contentReader, int64(offset)+4, int64(numHashes*8)) + + // hashes: + wantedHash := Hash(sig) + got, err := searchEytzinger(0, int(numHashes), wantedHash, func(index int) (uint64, error) { + pos := int64(index * 8) + return readUint64Le(bucketReader, pos) + }) + if err != nil { + if err == ErrNotFound { + return false, nil + } + return false, err + } + return got == wantedHash, nil +} + +func searchEytzinger(min int, max int, x uint64, getter func(int) (uint64, error)) (uint64, error) { + var index int + for index < max { + k, err := getter(index) + if err != nil { + return 0, err + } + if k == x { + return k, nil + } + index = index<<1 | 1 + if k < x { + index++ + } + } + return 0, ErrNotFound +} + +var ErrNotFound = fmt.Errorf("not found") + +func readUint64Le(reader io.ReaderAt, pos int64) (uint64, error) { + buf := make([]byte, 8) + _, err := reader.ReadAt(buf, pos) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint64(buf), nil +} diff --git a/bucketteer/write.go b/bucketteer/write.go new file mode 100644 index 00000000..5837f1be --- /dev/null +++ b/bucketteer/write.go @@ -0,0 +1,292 @@ +package bucketteer + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "os" + "sort" + + bin "github.com/gagliardetto/binary" +) + +type Writer struct { + destination *os.File + writer *bufio.Writer + prefixToHashes map[[2]byte][]uint64 // prefix -> hashes +} + +const ( + _MiB = 1024 * 1024 + writeBufSize = _MiB * 10 +) + +func NewWriter(path string) (*Writer, error) { + if ok, err := isDir(path); err != nil { + return nil, err + } else if ok { + return nil, fmt.Errorf("path is a directory") + } + if ok, err := fileIsBlank(path); err != nil { + return nil, err + } else if !ok { + return nil, fmt.Errorf("file is not blank") + } + file, err := os.Create(path) + if err != nil { + return nil, err + } + return &Writer{ + writer: bufio.NewWriterSize(file, writeBufSize), + destination: file, + prefixToHashes: make(map[[2]byte][]uint64), + }, nil +} + +// Put adds the given signature to the Bucketteer. +// Cannot be called concurrently. +func (b *Writer) Put(sig [64]byte) { + var prefix [2]byte + copy(prefix[:], sig[:2]) + b.prefixToHashes[prefix] = append(b.prefixToHashes[prefix], Hash(sig)) +} + +// Has returns true if the Bucketteer has seen the given signature. +func (b *Writer) Has(sig [64]byte) bool { + var prefix [2]byte + copy(prefix[:], sig[:2]) + hash := Hash(sig) + for _, h := range b.prefixToHashes[prefix] { + if h == hash { + return true + } + } + return false +} + +func (b *Writer) Close() error { + return b.destination.Close() +} + +// Seal writes the Bucketteer's state to the given writer. +func (b *Writer) Seal(meta map[string]string) (int64, error) { + // truncate file and seek to beginning: + if err := b.destination.Truncate(0); err != nil { + return 0, err + } + if _, err := b.destination.Seek(0, 0); err != nil { + return 0, err + } + newHeader, size, err := seal(b.writer, b.prefixToHashes, meta) + if err != nil { + return 0, err + } + return size, overwriteFileContentAt(b.destination, 0, newHeader) +} + +func createHeader( + magic [8]byte, + version uint64, + headerSizeIn uint32, + meta map[string]string, + prefixToOffset map[[2]byte]uint64, +) ([]byte, error) { + tmpHeaderBuf := new(bytes.Buffer) + headerWriter := bin.NewBorshEncoder(tmpHeaderBuf) + + // write header size: + if err := headerWriter.WriteUint32(headerSizeIn, binary.LittleEndian); err != nil { + return nil, err + } + // write magic: + if n, err := headerWriter.Write(magic[:]); err != nil { + return nil, err + } else { + if n != 8 { + return nil, fmt.Errorf("invalid number of bytes written for magic: %d", n) + } + } + // write version uint64 + if err := headerWriter.WriteUint64(version, binary.LittleEndian); err != nil { + return nil, err + } + // write meta + { + // write num meta entries + if err := headerWriter.WriteUint64(uint64(len(meta)), binary.LittleEndian); err != nil { + return nil, err + } + // write meta entries + for k, v := range meta { + if err := headerWriter.WriteString(k); err != nil { + return nil, err + } + if err := headerWriter.WriteString(v); err != nil { + return nil, err + } + } + } + // write num buckets + if err := headerWriter.WriteUint64(uint64(len(prefixToOffset)), binary.LittleEndian); err != nil { + return nil, err + } + + prefixes := getSortedPrefixes(prefixToOffset) + // write prefix+offset pairs + for _, prefix := range prefixes { + if _, err := headerWriter.Write(prefix[:]); err != nil { + return nil, err + } + offset := prefixToOffset[prefix] + if err := headerWriter.WriteUint64(offset, binary.LittleEndian); err != nil { + return nil, err + } + } + return tmpHeaderBuf.Bytes(), nil +} + +func overwriteFileContentAt( + file *os.File, + offset int64, + data []byte, +) error { + wrote, err := file.WriteAt(data, offset) + if err != nil { + return err + } + if wrote != len(data) { + return fmt.Errorf("wrote %d bytes, expected to write %d bytes", wrote, len(data)) + } + return err +} + +func getSortedPrefixes[K any](prefixToHashes map[[2]byte]K) [][2]byte { + prefixes := make([][2]byte, 0, len(prefixToHashes)) + for prefix := range prefixToHashes { + prefixes = append(prefixes, prefix) + } + sort.Slice(prefixes, func(i, j int) bool { + return bytes.Compare(prefixes[i][:], prefixes[j][:]) < 0 + }) + return prefixes +} + +func seal( + out *bufio.Writer, + prefixToHashes map[[2]byte][]uint64, + meta map[string]string, +) ([]byte, int64, error) { + prefixes := getSortedPrefixes(prefixToHashes) + prefixToOffset := make(map[[2]byte]uint64, len(prefixes)) + for _, prefix := range prefixes { + // initialize all offsets to 0: + prefixToOffset[prefix] = 0 + } + + totalWritten := int64(0) + // create and write draft header: + header, err := createHeader( + _Magic, + Version, + 0, // header size + meta, + prefixToOffset, + ) + if err != nil { + return nil, 0, err + } + headerSize, err := out.Write(header) + if err != nil { + return nil, 0, err + } + totalWritten += int64(headerSize) + + previousOffset := uint64(0) + for _, prefix := range prefixes { + entries := getCleanSet(prefixToHashes[prefix]) + if len(entries) != len(prefixToHashes[prefix]) { + panic(fmt.Sprintf("duplicate hashes for prefix %v", prefix)) + } + sortWithCompare(entries, func(i, j int) int { + if entries[i] < entries[j] { + return -1 + } else if entries[i] > entries[j] { + return 1 + } + return 0 + }) + + thisSize := 4 + len(entries)*8 + // write the clean set to the buckets buffer + if err := binary.Write(out, binary.LittleEndian, uint32(len(entries))); err != nil { + return nil, 0, err + } + for _, h := range entries { + if err := binary.Write(out, binary.LittleEndian, h); err != nil { + return nil, 0, err + } + } + + prefixToOffset[prefix] = previousOffset + previousOffset = previousOffset + uint64(thisSize) + totalWritten += int64(thisSize) + } + + // flush the buckets buffer: + if err := out.Flush(); err != nil { + return nil, 0, err + } + + // write final header by overwriting the draft header: + updatedHeader, err := createHeader( + _Magic, + Version, + uint32(headerSize-4), // -4 because we don't count the header size itself + meta, + prefixToOffset, + ) + if err != nil { + return nil, 0, err + } + return updatedHeader, totalWritten, err +} + +// getCleanSet returns a sorted, deduplicated copy of getCleanSet. +func getCleanSet(entries []uint64) []uint64 { + // sort: + sort.Slice(entries, func(i, j int) bool { + return entries[i] < entries[j] + }) + // dedup: + out := make([]uint64, 0, len(entries)) + for i := 0; i < len(entries); i++ { + if i > 0 && entries[i] == entries[i-1] { + continue + } + out = append(out, entries[i]) + } + return out +} + +func fileIsBlank(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return true, nil + } + return false, err + } + return info.Size() == 0, nil +} + +func isDir(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return info.IsDir(), nil +} diff --git a/car-dag-traverser.go b/car-dag-traverser.go index f038abc5..788b2ed6 100644 --- a/car-dag-traverser.go +++ b/car-dag-traverser.go @@ -316,8 +316,11 @@ func FindSubsets( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindSubset) { @@ -365,8 +368,11 @@ func FindBlocks( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindBlock) { @@ -411,8 +417,11 @@ func FindEntries( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindEntry) { @@ -454,8 +463,11 @@ func FindTransactions( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindTransaction) { @@ -497,8 +509,11 @@ func FindRewards( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindRewards) { @@ -540,8 +555,11 @@ func FindDataFrames( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { if block.RawData()[1] != byte(iplddecoders.KindDataFrame) { @@ -571,8 +589,11 @@ func FindAny( } for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - break + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err } { decoded, err := iplddecoders.DecodeAny(block.RawData()) diff --git a/cmd-dump-car.go b/cmd-dump-car.go index eab132e4..699c0bb0 100644 --- a/cmd-dump-car.go +++ b/cmd-dump-car.go @@ -7,6 +7,7 @@ import ( "io/fs" "os" "strconv" + "strings" "time" "github.com/klauspost/compress/zstd" @@ -17,12 +18,49 @@ import ( "github.com/ipld/go-car" "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" "github.com/rpcpool/yellowstone-faithful/iplddecoders" + "github.com/rpcpool/yellowstone-faithful/readahead" solanablockrewards "github.com/rpcpool/yellowstone-faithful/solana-block-rewards" solanatxmetaparsers "github.com/rpcpool/yellowstone-faithful/solana-tx-meta-parsers" "github.com/urfave/cli/v2" "k8s.io/klog/v2" ) +func isNumeric(s string) bool { + _, err := strconv.ParseInt(s, 10, 64) + return err == nil +} + +func shortToKind(s string) (iplddecoders.Kind, error) { + if isNumeric(s) { + parsed, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + if parsed < 0 || parsed > int64(iplddecoders.KindDataFrame) { + return 0, fmt.Errorf("unknown kind: %d", parsed) + } + return iplddecoders.Kind(parsed), nil + } + switch s { + case "tx", "transaction": + return iplddecoders.KindTransaction, nil + case "entry": + return iplddecoders.KindEntry, nil + case "block": + return iplddecoders.KindBlock, nil + case "subset": + return iplddecoders.KindSubset, nil + case "epoch": + return iplddecoders.KindEpoch, nil + case "rewards": + return iplddecoders.KindRewards, nil + case "dataframe": + return iplddecoders.KindDataFrame, nil + default: + return 0, fmt.Errorf("unknown kind: %s", s) + } +} + func newCmd_DumpCar() *cli.Command { var flagPrintFilter string var printID bool @@ -39,7 +77,7 @@ func newCmd_DumpCar() *cli.Command { &cli.StringFlag{ Name: "filter", Aliases: []string{"f", "print"}, - Usage: "print only nodes of these kinds (comma-separated); example: --filter 0,1,2", + Usage: "print only nodes of these kinds (comma-separated); example: --filter epoch,block", Destination: &flagPrintFilter, }, @@ -64,16 +102,15 @@ func newCmd_DumpCar() *cli.Command { Action: func(c *cli.Context) error { filter := make(intSlice, 0) if flagPrintFilter != "" { - for _, v := range flagPrintFilter { - if v == ',' { + for _, v := range strings.Split(flagPrintFilter, ",") { + v = strings.TrimSpace(v) + v = strings.ToLower(v) + if v == "" { continue } - parsed, err := strconv.ParseInt(string(v), 10, 64) + parsed, err := shortToKind(string(v)) if err != nil { - panic(err) - } - if parsed < 0 || parsed > int64(iplddecoders.KindEpoch) { - return fmt.Errorf("invalid filter value: %d", parsed) + return fmt.Errorf("error parsing filter: %w", err) } filter = append(filter, int(parsed)) } @@ -92,7 +129,12 @@ func newCmd_DumpCar() *cli.Command { defer file.Close() } - rd, err := car.NewCarReader(file) + cachingReader, err := readahead.NewCachingReaderFromReader(file, readahead.DefaultChunkSize) + if err != nil { + klog.Exitf("Failed to create caching reader: %s", err) + } + + rd, err := car.NewCarReader(cachingReader) if err != nil { klog.Exitf("Failed to open CAR: %s", err) } @@ -119,7 +161,7 @@ func newCmd_DumpCar() *cli.Command { dotEvery := 100_000 klog.Infof("A dot is printed every %d nodes", dotEvery) if filter.empty() { - klog.Info("Will print all nodes") + klog.Info("Will print all nodes of all kinds") } else { klog.Info("Will print only nodes of these kinds: ") for _, v := range filter { @@ -131,10 +173,16 @@ func newCmd_DumpCar() *cli.Command { } for { + if c.Context.Err() != nil { + return c.Context.Err() + } block, err := rd.Next() - if errors.Is(err, io.EOF) { - fmt.Println("EOF") - break + if err != nil { + if errors.Is(err, io.EOF) { + fmt.Println("EOF") + break + } + panic(err) } numNodesSeen++ if numNodesSeen%dotEvery == 0 { diff --git a/cmd-fetch.go b/cmd-fetch.go index 70465b9d..d206c696 100644 --- a/cmd-fetch.go +++ b/cmd-fetch.go @@ -44,7 +44,7 @@ import ( "k8s.io/klog/v2" ) -var fetchProviderAddrInfos []peer.AddrInfo +var globalFetchProviderAddrInfos []peer.AddrInfo var lassieFetchFlags = []cli.Flag{ &cli.StringFlag{ @@ -99,7 +99,7 @@ var lassieFetchFlags = []cli.Flag{ } var err error - fetchProviderAddrInfos, err = types.ParseProviderStrings(v) + globalFetchProviderAddrInfos, err = types.ParseProviderStrings(v) return err }, }, @@ -161,8 +161,8 @@ func Fetch(cctx *cli.Context) error { hostOpt := lassie.WithHost(host) lassieOpts := []lassie.LassieOption{providerTimeoutOpt, hostOpt} - if len(fetchProviderAddrInfos) > 0 { - finderOpt := lassie.WithFinder(retriever.NewDirectCandidateFinder(host, fetchProviderAddrInfos)) + if len(globalFetchProviderAddrInfos) > 0 { + finderOpt := lassie.WithFinder(retriever.NewDirectCandidateFinder(host, globalFetchProviderAddrInfos)) if cctx.IsSet("ipni-endpoint") { klog.Warning("Ignoring ipni-endpoint flag since direct provider is specified") } @@ -213,10 +213,10 @@ func Fetch(cctx *cli.Context) error { // create and subscribe an event recorder API if configured setupLassieEventRecorder(ctx, eventRecorderURL, authToken, instanceID, lassie) - if len(fetchProviderAddrInfos) == 0 { + if len(globalFetchProviderAddrInfos) == 0 { fmt.Fprintf(msgWriter, "Fetching %s", rootCid.String()+path) } else { - fmt.Fprintf(msgWriter, "Fetching %s from %v", rootCid.String()+path, fetchProviderAddrInfos) + fmt.Fprintf(msgWriter, "Fetching %s from %v", rootCid.String()+path, globalFetchProviderAddrInfos) } if progress { fmt.Fprintln(msgWriter) @@ -333,7 +333,7 @@ func (pp *progressPrinter) subscriber(event types.RetrievalEvent) { } else if pp.candidatesFound == 1 { num = "it" } - if len(fetchProviderAddrInfos) > 0 { + if len(globalFetchProviderAddrInfos) > 0 { fmt.Fprintf(pp.writer, "Found %d storage providers candidates from the indexer, querying %s:\n", pp.candidatesFound, num) } else { fmt.Fprintf(pp.writer, "Using the explicitly specified storage provider(s), querying %s:\n", num) diff --git a/cmd-rpc-server-car-getBlock.go b/cmd-rpc-server-car-getBlock.go index 9ed9627b..751c7111 100644 --- a/cmd-rpc-server-car-getBlock.go +++ b/cmd-rpc-server-car-getBlock.go @@ -67,7 +67,7 @@ func (t *timer) time(name string) { t.prev = time.Now() } -func (ser *rpcServer) handleGetBlock(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { +func (ser *deprecatedRPCServer) handleGetBlock(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { tim := newTimer() params, err := parseGetBlockRequest(req.Params) if err != nil { @@ -357,7 +357,7 @@ func (ser *rpcServer) handleGetBlock(ctx context.Context, conn *requestContext, // if it's a float, convert to int and use rentTypeToString if asFloat, ok := rewardAsMap["rewardType"].(float64); ok { - rewardAsMap["rewardType"] = rentTypeToString(int(asFloat)) + rewardAsMap["rewardType"] = rewardTypeToString(int(asFloat)) } } } @@ -470,8 +470,8 @@ func (ser *rpcServer) handleGetBlock(ctx context.Context, conn *requestContext, }) return } - parentEntryHash := solana.HashFromBytes(parentEntryNode.Hash) - blockResp.PreviousBlockhash = parentEntryHash.String() + parentEntryHash := solana.HashFromBytes(parentEntryNode.Hash).String() + blockResp.PreviousBlockhash = &parentEntryHash } } } @@ -509,7 +509,7 @@ func (ser *rpcServer) handleGetBlock(ctx context.Context, conn *requestContext, // Staking, // Voting, // } -func rentTypeToString(typ int) string { +func rewardTypeToString(typ int) string { switch typ { case 1: return "Fee" @@ -524,4 +524,19 @@ func rentTypeToString(typ int) string { } } +func rewardTypeStringToInt(typ string) int { + switch typ { + case "Fee": + return 1 + case "Rent": + return 2 + case "Staking": + return 3 + case "Voting": + return 4 + default: + return 0 + } +} + const CodeNotFound = -32009 diff --git a/cmd-rpc-server-car-getSignaturesForAddress.go b/cmd-rpc-server-car-getSignaturesForAddress.go index d7f09d2b..0d1f2e74 100644 --- a/cmd-rpc-server-car-getSignaturesForAddress.go +++ b/cmd-rpc-server-car-getSignaturesForAddress.go @@ -30,20 +30,20 @@ type GetSignaturesForAddressParams struct { func parseGetSignaturesForAddressParams(raw *json.RawMessage) (*GetSignaturesForAddressParams, error) { var params []any if err := json.Unmarshal(*raw, ¶ms); err != nil { - klog.Errorf("failed to unmarshal params: %v", err) - return nil, err + return nil, fmt.Errorf("failed to unmarshal params: %w", err) + } + if len(params) < 1 { + return nil, fmt.Errorf("expected at least 1 param") } sigRaw, ok := params[0].(string) if !ok { - klog.Errorf("first argument must be a string") - return nil, nil + return nil, fmt.Errorf("first argument must be a string") } out := &GetSignaturesForAddressParams{} pk, err := solana.PublicKeyFromBase58(sigRaw) if err != nil { - klog.Errorf("failed to parse pubkey from base58: %v", err) - return nil, err + return nil, fmt.Errorf("failed to parse pubkey from base58: %w", err) } out.Address = pk @@ -60,8 +60,7 @@ func parseGetSignaturesForAddressParams(raw *json.RawMessage) (*GetSignaturesFor if before, ok := before.(string); ok { sig, err := solana.SignatureFromBase58(before) if err != nil { - klog.Errorf("failed to parse signature from base58: %v", err) - return nil, err + return nil, fmt.Errorf("failed to parse signature from base58: %w", err) } out.Before = &sig } @@ -70,8 +69,7 @@ func parseGetSignaturesForAddressParams(raw *json.RawMessage) (*GetSignaturesFor if after, ok := after.(string); ok { sig, err := solana.SignatureFromBase58(after) if err != nil { - klog.Errorf("failed to parse signature from base58: %v", err) - return nil, err + return nil, fmt.Errorf("failed to parse signature from base58: %w", err) } out.Until = &sig } @@ -85,7 +83,7 @@ func parseGetSignaturesForAddressParams(raw *json.RawMessage) (*GetSignaturesFor return out, nil } -func (ser *rpcServer) handleGetSignaturesForAddress(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { +func (ser *deprecatedRPCServer) handleGetSignaturesForAddress(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { if ser.gsfaReader == nil { klog.Errorf("gsfaReader is nil") conn.ReplyWithError( @@ -225,7 +223,7 @@ func (ser *rpcServer) handleGetSignaturesForAddress(ctx context.Context, conn *r } // reply with the data - err = conn.ReplyNoMod( + err = conn.ReplyRaw( ctx, req.ID, response, diff --git a/cmd-rpc-server-car-getTransaction.go b/cmd-rpc-server-car-getTransaction.go index 0d9559f2..36f65953 100644 --- a/cmd-rpc-server-car-getTransaction.go +++ b/cmd-rpc-server-car-getTransaction.go @@ -2,20 +2,14 @@ package main import ( "context" - "encoding/base64" "errors" - "github.com/mr-tron/base58" "github.com/rpcpool/yellowstone-faithful/compactindex36" "github.com/sourcegraph/jsonrpc2" "k8s.io/klog/v2" ) -func ptrToUint64(v uint64) *uint64 { - return &v -} - -func (ser *rpcServer) handleGetTransaction(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { +func (ser *deprecatedRPCServer) handleGetTransaction(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { params, err := parseGetTransactionRequest(req.Params) if err != nil { klog.Errorf("failed to parse params: %v", err) @@ -34,7 +28,7 @@ func (ser *rpcServer) handleGetTransaction(ctx context.Context, conn *requestCon transactionNode, err := ser.GetTransaction(WithSubrapghPrefetch(ctx, true), sig) if err != nil { if errors.Is(err, compactindex36.ErrNotFound) { - conn.ReplyNoMod( + conn.ReplyRaw( ctx, req.ID, nil, // NOTE: solana just returns null here in case of transaction not found @@ -128,115 +122,3 @@ func (ser *rpcServer) handleGetTransaction(ctx context.Context, conn *requestCon klog.Errorf("failed to reply: %v", err) } } - -// byteSliceAsIntegerSlice converts a byte slice to an integer slice. -func byteSliceAsIntegerSlice(b []byte) []uint64 { - var ret []uint64 - for i := 0; i < len(b); i++ { - ret = append(ret, uint64(b[i])) - } - return ret -} - -// adaptTransactionMetaToExpectedOutput adapts the transaction meta to the expected output -// as per what solana RPC server returns. -func adaptTransactionMetaToExpectedOutput(m map[string]any) map[string]any { - meta, ok := m["meta"].(map[string]any) - if !ok { - return m - } - { - if _, ok := meta["err"]; ok { - meta["err"], _ = parseTransactionError(meta["err"]) - } else { - meta["err"] = nil - } - } - { - if _, ok := meta["loadedAddresses"]; !ok { - meta["loadedAddresses"] = map[string]any{ - "readonly": []any{}, - "writable": []any{}, - } - } - if _, ok := meta["postTokenBalances"]; !ok { - meta["postTokenBalances"] = []any{} - } - if _, ok := meta["preTokenBalances"]; !ok { - meta["preTokenBalances"] = []any{} - } - if _, ok := meta["rewards"]; !ok { - meta["rewards"] = []any{} - } - if _, ok := meta["status"]; !ok { - eee, ok := meta["err"] - if ok { - if eee == nil { - meta["status"] = map[string]any{ - "Ok": nil, - } - } else { - meta["status"] = map[string]any{ - "Err": eee, - } - } - } - } - } - { - innerInstructionsAny, ok := meta["innerInstructions"] - if !ok { - meta["innerInstructions"] = []any{} - return m - } - innerInstructions, ok := innerInstructionsAny.([]any) - if !ok { - return m - } - for i, innerInstructionAny := range innerInstructions { - innerInstruction, ok := innerInstructionAny.(map[string]any) - if !ok { - continue - } - instructionsAny, ok := innerInstruction["instructions"] - if !ok { - continue - } - instructions, ok := instructionsAny.([]any) - if !ok { - continue - } - for _, instructionAny := range instructions { - instruction, ok := instructionAny.(map[string]any) - if !ok { - continue - } - { - if accounts, ok := instruction["accounts"]; ok { - // as string - accountsStr, ok := accounts.(string) - if ok { - decoded, err := base64.StdEncoding.DecodeString(accountsStr) - if err == nil { - instruction["accounts"] = byteSliceAsIntegerSlice(decoded) - } - } - } - if data, ok := instruction["data"]; ok { - // as string - dataStr, ok := data.(string) - if ok { - decoded, err := base64.StdEncoding.DecodeString(dataStr) - if err == nil { - // TODO: the data in the `innerInstructions` is always base58 encoded (even if the transaction is base64 encoded) - instruction["data"] = base58.Encode(decoded) - } - } - } - } - } - meta["innerInstructions"].([]any)[i] = innerInstruction - } - } - return m -} diff --git a/cmd-rpc-server-car.go b/cmd-rpc-server-car.go index b3b600cd..4bf3ff83 100644 --- a/cmd-rpc-server-car.go +++ b/cmd-rpc-server-car.go @@ -2,32 +2,21 @@ package main import ( "bufio" - "bytes" "context" - "encoding/binary" - "encoding/json" - "errors" "fmt" "io" - "net/http" - "os" - "strings" "time" - bin "github.com/gagliardetto/binary" "github.com/gagliardetto/solana-go" "github.com/ipfs/go-cid" "github.com/ipld/go-car/util" carv2 "github.com/ipld/go-car/v2" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - jsoniter "github.com/json-iterator/go" "github.com/patrickmn/go-cache" "github.com/rpcpool/yellowstone-faithful/compactindex" "github.com/rpcpool/yellowstone-faithful/compactindex36" "github.com/rpcpool/yellowstone-faithful/gsfa" "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" "github.com/rpcpool/yellowstone-faithful/iplddecoders" - solanatxmetaparsers "github.com/rpcpool/yellowstone-faithful/solana-tx-meta-parsers" "github.com/sourcegraph/jsonrpc2" "github.com/urfave/cli/v2" "github.com/valyala/fasthttp" @@ -76,7 +65,11 @@ func newCmd_rpcServerCar() *cli.Command { return cli.Exit("Must provide a signature-to-CID index filepath/url", 1) } - cidToOffsetIndexFile, err := openIndexStorage(cidToOffsetIndexFilepath) + cidToOffsetIndexFile, err := openIndexStorage( + c.Context, + cidToOffsetIndexFilepath, + DebugMode, + ) if err != nil { return fmt.Errorf("failed to open index file: %w", err) } @@ -87,7 +80,11 @@ func newCmd_rpcServerCar() *cli.Command { return fmt.Errorf("failed to open index: %w", err) } - slotToCidIndexFile, err := openIndexStorage(slotToCidIndexFilepath) + slotToCidIndexFile, err := openIndexStorage( + c.Context, + slotToCidIndexFilepath, + DebugMode, + ) if err != nil { return fmt.Errorf("failed to open index file: %w", err) } @@ -98,7 +95,11 @@ func newCmd_rpcServerCar() *cli.Command { return fmt.Errorf("failed to open index: %w", err) } - sigToCidIndexFile, err := openIndexStorage(sigToCidIndexFilepath) + sigToCidIndexFile, err := openIndexStorage( + c.Context, + sigToCidIndexFilepath, + DebugMode, + ) if err != nil { return fmt.Errorf("failed to open index file: %w", err) } @@ -109,7 +110,7 @@ func newCmd_rpcServerCar() *cli.Command { return fmt.Errorf("failed to open index: %w", err) } - localCarReader, remoteCarReader, err := openCarStorage(carFilepath) + localCarReader, remoteCarReader, err := openCarStorage(c.Context, carFilepath) if err != nil { return fmt.Errorf("failed to open CAR file: %w", err) } @@ -143,162 +144,6 @@ func newCmd_rpcServerCar() *cli.Command { } } -// openIndexStorage open a compactindex from a local file, or from a remote URL. -// Supported protocols are: -// - http:// -// - https:// -func openIndexStorage(where string) (ReaderAtCloser, error) { - where = strings.TrimSpace(where) - if strings.HasPrefix(where, "http://") || strings.HasPrefix(where, "https://") { - return remoteHTTPFileAsIoReaderAt(where) - } - // TODO: add support for IPFS gateways. - // TODO: add support for Filecoin gateways. - return os.Open(where) -} - -func openCarStorage(where string) (*carv2.Reader, ReaderAtCloser, error) { - where = strings.TrimSpace(where) - if strings.HasPrefix(where, "http://") || strings.HasPrefix(where, "https://") { - rem, err := remoteHTTPFileAsIoReaderAt(where) - return nil, rem, err - } - // TODO: add support for IPFS gateways. - // TODO: add support for Filecoin gateways. - - carReader, err := carv2.OpenReader(where) - if err != nil { - return nil, nil, fmt.Errorf("failed to open CAR file: %w", err) - } - return carReader, nil, nil -} - -type ReaderAtCloser interface { - io.ReaderAt - io.Closer -} - -// remoteHTTPFileAsIoReaderAt returns a ReaderAtCloser for a remote file. -// The returned ReaderAtCloser is backed by a http.Client. -func remoteHTTPFileAsIoReaderAt(url string) (ReaderAtCloser, error) { - // send a request to the server to get the file size: - resp, err := http.Head(url) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) - } - contentLength := resp.ContentLength - - // Create a cache with a default expiration time of 5 minutes, and which - // purges expired items every 10 minutes - ca := cache.New(5*time.Minute, 10*time.Minute) - - return &HTTPSingleFileRemoteReaderAt{ - url: url, - contentLength: contentLength, - client: newHTTPClient(), - ca: ca, - }, nil -} - -type HTTPSingleFileRemoteReaderAt struct { - url string - contentLength int64 - client *http.Client - ca *cache.Cache -} - -func getHttpCacheKey(off int64, p []byte) string { - return fmt.Sprintf("%d-%d", off, len(p)) -} - -func (r *HTTPSingleFileRemoteReaderAt) getFromCache(off int64, p []byte) (n int, err error, has bool) { - key := getHttpCacheKey(off, p) - if v, ok := r.ca.Get(key); ok { - return copy(p, v.([]byte)), nil, true - } - return 0, nil, false -} - -func (r *HTTPSingleFileRemoteReaderAt) putInCache(off int64, p []byte) { - key := getHttpCacheKey(off, p) - r.ca.Set(key, p, cache.DefaultExpiration) -} - -// Close implements io.Closer. -func (r *HTTPSingleFileRemoteReaderAt) Close() error { - r.client.CloseIdleConnections() - return nil -} - -func retryExpotentialBackoff( - ctx context.Context, - startDuration time.Duration, - maxRetries int, - fn func() error, -) error { - var err error - for i := 0; i < maxRetries; i++ { - err = fn() - if err == nil { - return nil - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(startDuration): - startDuration *= 2 - } - } - return fmt.Errorf("failed after %d retries; last error: %w", maxRetries, err) -} - -func (r *HTTPSingleFileRemoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { - if off >= r.contentLength { - return 0, io.EOF - } - fmt.Print(".") - if n, err, has := r.getFromCache(off, p); has { - return n, err - } - req, err := http.NewRequest("GET", r.url, nil) - if err != nil { - return 0, err - } - { - req.Header.Set("Connection", "keep-alive") - req.Header.Set("Keep-Alive", "timeout=600") - } - - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p)))) - - var resp *http.Response - err = retryExpotentialBackoff( - context.Background(), - 100*time.Millisecond, - 3, - func() error { - resp, err = r.client.Do(req) - return err - }) - if err != nil { - return 0, err - } - defer resp.Body.Close() - { - n, err := io.ReadFull(resp.Body, p) - if err != nil { - return 0, err - } - copyForCache := make([]byte, len(p)) - copy(copyForCache, p) - r.putInCache(off, copyForCache) - return n, nil - } -} - // createAndStartRPCServer_withCar creates and starts a JSON RPC server. // Data: // - Nodes: the node data is read from a CAR file (which can be a local file or a remote URL). @@ -323,7 +168,7 @@ func createAndStartRPCServer_withCar( } listenOn := options.ListenOn ca := cache.New(30*time.Second, 1*time.Minute) - handler := &rpcServer{ + handler := &deprecatedRPCServer{ localCarReader: carReader, remoteCarReader: remoteCarReader, cidToOffsetIndex: cidToOffsetIndex, @@ -354,7 +199,7 @@ func createAndStartRPCServer_lassie( } listenOn := options.ListenOn ca := cache.New(30*time.Second, 1*time.Minute) - handler := &rpcServer{ + handler := &deprecatedRPCServer{ lassieFetcher: lassieWr, slotToCidIndex: slotToCidIndex, sigToCidIndex: sigToCidIndex, @@ -375,7 +220,7 @@ type RpcServerOptions struct { GsfaOnlySignatures bool } -type rpcServer struct { +type deprecatedRPCServer struct { lassieFetcher *lassieWrapper localCarReader *carv2.Reader remoteCarReader ReaderAtCloser @@ -391,128 +236,18 @@ func getCidCacheKey(off int64, p []byte) string { return fmt.Sprintf("%d-%d", off, len(p)) } -func (r *rpcServer) getNodeFromCache(c cid.Cid) (v []byte, err error, has bool) { +func (r *deprecatedRPCServer) getNodeFromCache(c cid.Cid) (v []byte, err error, has bool) { if v, ok := r.cidToBlockCache.Get(c.String()); ok { return v.([]byte), nil, true } return nil, nil, false } -func (r *rpcServer) putNodeInCache(c cid.Cid, data []byte) { +func (r *deprecatedRPCServer) putNodeInCache(c cid.Cid, data []byte) { r.cidToBlockCache.Set(c.String(), data, cache.DefaultExpiration) } -type requestContext struct { - ctx *fasthttp.RequestCtx -} - -// ReplyWithError(ctx context.Context, id ID, respErr *Error) error { -func (c *requestContext) ReplyWithError(ctx context.Context, id jsonrpc2.ID, respErr *jsonrpc2.Error) error { - resp := &jsonrpc2.Response{ - ID: id, - Error: respErr, - } - replyJSON(c.ctx, http.StatusOK, resp) - return nil -} - -func toMapAny(v any) (map[string]any, error) { - b, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(v) - if err != nil { - return nil, err - } - var m map[string]any - if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -// MapToCamelCase converts a map[string]interface{} to a map[string]interface{} with camelCase keys -func MapToCamelCase(m map[string]any) map[string]any { - newMap := make(map[string]any) - for k, v := range m { - newMap[toLowerCamelCase(k)] = MapToCamelCaseAny(v) - } - return newMap -} - -func MapToCamelCaseAny(m any) any { - if m == nil { - return nil - } - if m, ok := m.(map[string]any); ok { - return MapToCamelCase(m) - } - // if array, convert each element - if m, ok := m.([]any); ok { - for i, v := range m { - m[i] = MapToCamelCaseAny(v) - } - } - return m -} - -func toLowerCamelCase(v string) string { - pascal := bin.ToPascalCase(v) - if len(pascal) == 0 { - return "" - } - if len(pascal) == 1 { - return strings.ToLower(pascal) - } - return strings.ToLower(pascal[:1]) + pascal[1:] -} - -// Reply(ctx context.Context, id ID, result interface{}) error { -func (c *requestContext) Reply( - ctx context.Context, - id jsonrpc2.ID, - result interface{}, - remapCallback func(map[string]any) map[string]any, -) error { - mm, err := toMapAny(result) - if err != nil { - return err - } - result = MapToCamelCaseAny(mm) - if remapCallback != nil { - if mp, ok := result.(map[string]any); ok { - result = remapCallback(mp) - } - } - resRaw, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(result) - if err != nil { - return err - } - raw := json.RawMessage(resRaw) - resp := &jsonrpc2.Response{ - ID: id, - Result: &raw, - } - replyJSON(c.ctx, http.StatusOK, resp) - return err -} - -func (c *requestContext) ReplyNoMod( - ctx context.Context, - id jsonrpc2.ID, - result interface{}, -) error { - resRaw, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(result) - if err != nil { - return err - } - raw := json.RawMessage(resRaw) - resp := &jsonrpc2.Response{ - ID: id, - Result: &raw, - } - replyJSON(c.ctx, http.StatusOK, resp) - return err -} - -func (s *rpcServer) prefetchSubgraph(ctx context.Context, wantedCid cid.Cid) error { +func (s *deprecatedRPCServer) prefetchSubgraph(ctx context.Context, wantedCid cid.Cid) error { if s.lassieFetcher != nil { // Fetch the subgraph from lassie sub, err := s.lassieFetcher.GetSubgraph(ctx, wantedCid) @@ -529,7 +264,7 @@ func (s *rpcServer) prefetchSubgraph(ctx context.Context, wantedCid cid.Cid) err return nil } -func (s *rpcServer) GetNodeByCid(ctx context.Context, wantedCid cid.Cid) ([]byte, error) { +func (s *deprecatedRPCServer) GetNodeByCid(ctx context.Context, wantedCid cid.Cid) ([]byte, error) { { // try from cache data, err, has := s.getNodeFromCache(wantedCid) @@ -561,46 +296,7 @@ func (s *rpcServer) GetNodeByCid(ctx context.Context, wantedCid cid.Cid) ([]byte return s.GetNodeByOffset(ctx, wantedCid, offset) } -func readSectionFromReaderAt(reader ReaderAtCloser, offset uint64, length uint64) ([]byte, error) { - data := make([]byte, length) - _, err := reader.ReadAt(data, int64(offset)) - if err != nil { - return nil, err - } - return data, nil -} - -func readNodeFromReaderAt(reader ReaderAtCloser, wantedCid cid.Cid, offset uint64) ([]byte, error) { - // read MaxVarintLen64 bytes - lenBuf := make([]byte, binary.MaxVarintLen64) - _, err := reader.ReadAt(lenBuf, int64(offset)) - if err != nil { - return nil, err - } - // read uvarint - dataLen, n := binary.Uvarint(lenBuf) - offset += uint64(n) - if dataLen > uint64(util.MaxAllowedSectionSize) { // Don't OOM - return nil, errors.New("malformed car; header is bigger than util.MaxAllowedSectionSize") - } - data := make([]byte, dataLen) - _, err = reader.ReadAt(data, int64(offset)) - if err != nil { - return nil, err - } - - n, gotCid, err := cid.CidFromReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - // verify that the CID we read matches the one we expected. - if !gotCid.Equals(wantedCid) { - return nil, fmt.Errorf("CID mismatch: expected %s, got %s", wantedCid, gotCid) - } - return data[n:], nil -} - -func (s *rpcServer) ReadAtFromCar(ctx context.Context, offset uint64, length uint64) ([]byte, error) { +func (s *deprecatedRPCServer) ReadAtFromCar(ctx context.Context, offset uint64, length uint64) ([]byte, error) { if s.localCarReader == nil { // try remote reader if s.remoteCarReader == nil { @@ -624,7 +320,7 @@ func (s *rpcServer) ReadAtFromCar(ctx context.Context, offset uint64, length uin return data, nil } -func (s *rpcServer) GetNodeByOffset(ctx context.Context, wantedCid cid.Cid, offset uint64) ([]byte, error) { +func (s *deprecatedRPCServer) GetNodeByOffset(ctx context.Context, wantedCid cid.Cid, offset uint64) ([]byte, error) { if s.localCarReader == nil { // try remote reader if s.remoteCarReader == nil { @@ -654,55 +350,19 @@ func (s *rpcServer) GetNodeByOffset(ctx context.Context, wantedCid cid.Cid, offs return data, nil } -type GetBlockRequest struct { - Slot uint64 `json:"slot"` - // TODO: add more params -} - -func parseGetBlockRequest(raw *json.RawMessage) (*GetBlockRequest, error) { - var params []any - if err := json.Unmarshal(*raw, ¶ms); err != nil { - klog.Errorf("failed to unmarshal params: %v", err) - return nil, err - } - slotRaw, ok := params[0].(float64) - if !ok { - klog.Errorf("first argument must be a number, got %T", params[0]) - return nil, nil - } - - return &GetBlockRequest{ - Slot: uint64(slotRaw), - }, nil -} - -func (ser *rpcServer) FindCidFromSlot(ctx context.Context, slot uint64) (cid.Cid, error) { +func (ser *deprecatedRPCServer) FindCidFromSlot(ctx context.Context, slot uint64) (cid.Cid, error) { return findCidFromSlot(ser.slotToCidIndex, slot) } -func (ser *rpcServer) FindCidFromSignature(ctx context.Context, sig solana.Signature) (cid.Cid, error) { +func (ser *deprecatedRPCServer) FindCidFromSignature(ctx context.Context, sig solana.Signature) (cid.Cid, error) { return findCidFromSignature(ser.sigToCidIndex, sig) } -func (ser *rpcServer) FindOffsetFromCid(ctx context.Context, cid cid.Cid) (uint64, error) { +func (ser *deprecatedRPCServer) FindOffsetFromCid(ctx context.Context, cid cid.Cid) (uint64, error) { return findOffsetFromCid(ser.cidToOffsetIndex, cid) } -func putValueIntoContext(ctx context.Context, key, value interface{}) context.Context { - return context.WithValue(ctx, key, value) -} - -func getValueFromContext(ctx context.Context, key interface{}) interface{} { - return ctx.Value(key) -} - -// WithSubrapghPrefetch sets the prefetch flag in the context -// to enable prefetching of subgraphs. -func WithSubrapghPrefetch(ctx context.Context, yesNo bool) context.Context { - return putValueIntoContext(ctx, "prefetch", yesNo) -} - -func (ser *rpcServer) GetBlock(ctx context.Context, slot uint64) (*ipldbindcode.Block, error) { +func (ser *deprecatedRPCServer) GetBlock(ctx context.Context, slot uint64) (*ipldbindcode.Block, error) { // get the slot by slot number wantedCid, err := ser.FindCidFromSlot(ctx, slot) if err != nil { @@ -732,7 +392,7 @@ func (ser *rpcServer) GetBlock(ctx context.Context, slot uint64) (*ipldbindcode. return decoded, nil } -func (ser *rpcServer) GetEntryByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Entry, error) { +func (ser *deprecatedRPCServer) GetEntryByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Entry, error) { data, err := ser.GetNodeByCid(ctx, wantedCid) if err != nil { klog.Errorf("failed to find node by cid: %v", err) @@ -747,7 +407,7 @@ func (ser *rpcServer) GetEntryByCid(ctx context.Context, wantedCid cid.Cid) (*ip return decoded, nil } -func (ser *rpcServer) GetTransactionByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Transaction, error) { +func (ser *deprecatedRPCServer) GetTransactionByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Transaction, error) { data, err := ser.GetNodeByCid(ctx, wantedCid) if err != nil { klog.Errorf("failed to find node by cid: %v", err) @@ -762,7 +422,7 @@ func (ser *rpcServer) GetTransactionByCid(ctx context.Context, wantedCid cid.Cid return decoded, nil } -func (ser *rpcServer) GetDataFrameByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error) { +func (ser *deprecatedRPCServer) GetDataFrameByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error) { data, err := ser.GetNodeByCid(ctx, wantedCid) if err != nil { klog.Errorf("failed to find node by cid: %v", err) @@ -777,7 +437,7 @@ func (ser *rpcServer) GetDataFrameByCid(ctx context.Context, wantedCid cid.Cid) return decoded, nil } -func (ser *rpcServer) GetRewardsByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Rewards, error) { +func (ser *deprecatedRPCServer) GetRewardsByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Rewards, error) { data, err := ser.GetNodeByCid(ctx, wantedCid) if err != nil { klog.Errorf("failed to find node by cid: %v", err) @@ -792,7 +452,7 @@ func (ser *rpcServer) GetRewardsByCid(ctx context.Context, wantedCid cid.Cid) (* return decoded, nil } -func (ser *rpcServer) GetTransaction(ctx context.Context, sig solana.Signature) (*ipldbindcode.Transaction, error) { +func (ser *deprecatedRPCServer) GetTransaction(ctx context.Context, sig solana.Signature) (*ipldbindcode.Transaction, error) { // get the CID by signature wantedCid, err := ser.FindCidFromSignature(ctx, sig) if err != nil { @@ -822,35 +482,8 @@ func (ser *rpcServer) GetTransaction(ctx context.Context, sig solana.Signature) return decoded, nil } -type GetTransactionRequest struct { - Signature solana.Signature `json:"signature"` - // TODO: add more params -} - -func parseGetTransactionRequest(raw *json.RawMessage) (*GetTransactionRequest, error) { - var params []any - if err := json.Unmarshal(*raw, ¶ms); err != nil { - klog.Errorf("failed to unmarshal params: %v", err) - return nil, err - } - sigRaw, ok := params[0].(string) - if !ok { - klog.Errorf("first argument must be a string") - return nil, nil - } - - sig, err := solana.SignatureFromBase58(sigRaw) - if err != nil { - klog.Errorf("failed to convert signature from base58: %v", err) - return nil, err - } - return &GetTransactionRequest{ - Signature: sig, - }, nil -} - // jsonrpc2.RequestHandler interface -func (ser *rpcServer) Handle(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { +func (ser *deprecatedRPCServer) Handle(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) { switch req.Method { case "getBlock": ser.handleGetBlock(ctx, conn, req) @@ -868,112 +501,3 @@ func (ser *rpcServer) Handle(ctx context.Context, conn *requestContext, req *jso }) } } - -type GetBlockResponse struct { - BlockHeight *uint64 `json:"blockHeight"` - BlockTime *uint64 `json:"blockTime"` - Blockhash string `json:"blockhash"` - ParentSlot uint64 `json:"parentSlot"` - PreviousBlockhash string `json:"previousBlockhash"` - Rewards any `json:"rewards"` // TODO: use same format as solana - Transactions []GetTransactionResponse `json:"transactions"` -} - -type GetTransactionResponse struct { - // TODO: use same format as solana - Blocktime *uint64 `json:"blockTime,omitempty"` - Meta any `json:"meta"` - Slot *uint64 `json:"slot,omitempty"` - Transaction []any `json:"transaction"` - Version any `json:"version"` - Position uint64 `json:"-"` // TODO: enable this - Signatures []solana.Signature `json:"-"` // TODO: enable this -} - -func loadDataFromDataFrames( - firstDataFrame *ipldbindcode.DataFrame, - dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), -) ([]byte, error) { - dataBuffer := new(bytes.Buffer) - allFrames, err := getAllFramesFromDataFrame(firstDataFrame, dataFrameGetter) - if err != nil { - return nil, err - } - for _, frame := range allFrames { - dataBuffer.Write(frame.Bytes()) - } - // verify the data hash (if present) - bufHash, ok := firstDataFrame.GetHash() - if !ok { - return dataBuffer.Bytes(), nil - } - err = ipldbindcode.VerifyHash(dataBuffer.Bytes(), bufHash) - if err != nil { - return nil, err - } - return dataBuffer.Bytes(), nil -} - -func getAllFramesFromDataFrame( - firstDataFrame *ipldbindcode.DataFrame, - dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), -) ([]*ipldbindcode.DataFrame, error) { - frames := []*ipldbindcode.DataFrame{firstDataFrame} - // get the next data frames - next, ok := firstDataFrame.GetNext() - if !ok || len(next) == 0 { - return frames, nil - } - for _, cid := range next { - nextDataFrame, err := dataFrameGetter(context.Background(), cid.(cidlink.Link).Cid) - if err != nil { - return nil, err - } - nextFrames, err := getAllFramesFromDataFrame(nextDataFrame, dataFrameGetter) - if err != nil { - return nil, err - } - frames = append(frames, nextFrames...) - } - return frames, nil -} - -func parseTransactionAndMetaFromNode( - transactionNode *ipldbindcode.Transaction, - dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), -) (tx solana.Transaction, meta any, _ error) { - { - transactionBuffer, err := loadDataFromDataFrames(&transactionNode.Data, dataFrameGetter) - if err != nil { - return solana.Transaction{}, nil, err - } - if err := bin.UnmarshalBin(&tx, transactionBuffer); err != nil { - klog.Errorf("failed to unmarshal transaction: %v", err) - return solana.Transaction{}, nil, err - } else if len(tx.Signatures) == 0 { - klog.Errorf("transaction has no signatures") - return solana.Transaction{}, nil, err - } - } - - { - metaBuffer, err := loadDataFromDataFrames(&transactionNode.Metadata, dataFrameGetter) - if err != nil { - return solana.Transaction{}, nil, err - } - if len(metaBuffer) > 0 { - uncompressedMeta, err := decompressZstd(metaBuffer) - if err != nil { - klog.Errorf("failed to decompress metadata: %v", err) - return - } - status, err := solanatxmetaparsers.ParseAnyTransactionStatusMeta(uncompressedMeta) - if err != nil { - klog.Errorf("failed to parse metadata: %v", err) - return - } - meta = status - } - } - return -} diff --git a/cmd-rpc-server-filecoin.go b/cmd-rpc-server-filecoin.go index 4edd2a13..8e32a985 100644 --- a/cmd-rpc-server-filecoin.go +++ b/cmd-rpc-server-filecoin.go @@ -1,15 +1,12 @@ package main import ( - "encoding/json" "fmt" - "os" "github.com/davecgh/go-spew/spew" "github.com/rpcpool/yellowstone-faithful/compactindex36" "github.com/rpcpool/yellowstone-faithful/gsfa" "github.com/urfave/cli/v2" - "gopkg.in/yaml.v3" ) func newCmd_rpcServerFilecoin() *cli.Command { @@ -54,7 +51,11 @@ func newCmd_rpcServerFilecoin() *cli.Command { return cli.Exit("Must provide a signature-to-CID index filepath/url", 1) } - slotToCidIndexFile, err := openIndexStorage(config.Indexes.SlotToCid) + slotToCidIndexFile, err := openIndexStorage( + c.Context, + config.Indexes.SlotToCid, + DebugMode, + ) if err != nil { return fmt.Errorf("failed to open slot-to-cid index file: %w", err) } @@ -65,7 +66,11 @@ func newCmd_rpcServerFilecoin() *cli.Command { return fmt.Errorf("failed to open slot-to-cid index: %w", err) } - sigToCidIndexFile, err := openIndexStorage(config.Indexes.SigToCid) + sigToCidIndexFile, err := openIndexStorage( + c.Context, + config.Indexes.SigToCid, + DebugMode, + ) if err != nil { return fmt.Errorf("failed to open sig-to-cid index file: %w", err) } @@ -76,7 +81,7 @@ func newCmd_rpcServerFilecoin() *cli.Command { return fmt.Errorf("failed to open sig-to-cid index: %w", err) } - ls, err := newLassieWrapper(c) + ls, err := newLassieWrapper(c, globalFetchProviderAddrInfos) if err != nil { return fmt.Errorf("newLassieWrapper: %w", err) } @@ -161,30 +166,11 @@ func (cfg *RpcServerFilecoinConfig) load(configFilepath string) error { } func (cfg *RpcServerFilecoinConfig) loadFromJSON(configFilepath string) error { - file, err := os.Open(configFilepath) - if err != nil { - return fmt.Errorf("failed to open config file: %w", err) - } - defer file.Close() - return json.NewDecoder(file).Decode(cfg) + return loadFromJSON(configFilepath, cfg) } func (cfg *RpcServerFilecoinConfig) loadFromYAML(configFilepath string) error { - file, err := os.Open(configFilepath) - if err != nil { - return fmt.Errorf("failed to open config file: %w", err) - } - defer file.Close() - - return yaml.NewDecoder(file).Decode(cfg) -} - -func isJSONFile(filepath string) bool { - return filepath[len(filepath)-5:] == ".json" -} - -func isYAMLFile(filepath string) bool { - return filepath[len(filepath)-5:] == ".yaml" || filepath[len(filepath)-4:] == ".yml" + return loadFromYAML(configFilepath, cfg) } type RpcServerFilecoinConfig struct { diff --git a/cmd-rpc.go b/cmd-rpc.go new file mode 100644 index 00000000..6e46bb54 --- /dev/null +++ b/cmd-rpc.go @@ -0,0 +1,600 @@ +package main + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "sort" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/fsnotify/fsnotify" + "github.com/ryanuber/go-glob" + "github.com/urfave/cli/v2" + "golang.org/x/sync/errgroup" + "k8s.io/klog/v2" +) + +func newCmd_rpc() *cli.Command { + var listenOn string + var gsfaOnlySignatures bool + var includePatterns cli.StringSlice + var excludePatterns cli.StringSlice + var watch bool + var pathForProxyForUnknownRpcMethods string + var epochSearchConcurrency int + var epochLoadConcurrency int + return &cli.Command{ + Name: "rpc", + Description: "Provide multiple epoch config files, and start a Solana JSON RPC that exposes getTransaction, getBlock, and (optionally) getSignaturesForAddress", + ArgsUsage: "", + Before: func(c *cli.Context) error { + return nil + }, + Flags: append(lassieFetchFlags, + &cli.StringFlag{ + Name: "listen", + Usage: "Listen address", + Value: ":8899", + Destination: &listenOn, + }, + &cli.BoolFlag{ + Name: "gsfa-only-signatures", + Usage: "gSFA: only return signatures", + Value: false, + Destination: &gsfaOnlySignatures, + }, + &cli.BoolFlag{ + Name: "debug", + Usage: "Enable debug logging", + Value: false, + Destination: &DebugMode, + }, + &cli.StringSliceFlag{ + Name: "include", + Usage: "Include files or dirs matching the given glob patterns", + Value: cli.NewStringSlice(), + Destination: &includePatterns, + }, + &cli.StringSliceFlag{ + Name: "exclude", + Usage: "Exclude files or dirs matching the given glob patterns", + Value: cli.NewStringSlice(".git"), + Destination: &excludePatterns, + }, + &cli.BoolFlag{ + Name: "watch", + Usage: "Watch the config files and directories for changes, and live-(re)load them", + Value: false, + Destination: &watch, + }, + &cli.StringFlag{ + Name: "proxy", + Usage: "Path to a config file that will be used to proxy unknown RPC methods", + Value: "", + Destination: &pathForProxyForUnknownRpcMethods, + }, + &cli.IntFlag{ + Name: "epoch-search-concurrency", + Usage: "How many epochs to search in parallel when searching for a signature", + Value: runtime.NumCPU(), + Destination: &epochSearchConcurrency, + }, + &cli.IntFlag{ + Name: "epoch-load-concurrency", + Usage: "How many epochs to load in parallel when starting the RPC server", + Value: runtime.NumCPU(), + Destination: &epochLoadConcurrency, + }, + ), + Action: func(c *cli.Context) error { + src := c.Args().Slice() + configFiles, err := GetListOfConfigFiles( + src, + includePatterns.Value(), + excludePatterns.Value(), + ) + if err != nil { + return cli.Exit(err.Error(), 1) + } + klog.Infof("Found %d config files:", len(configFiles)) + for _, configFile := range configFiles { + fmt.Printf(" - %s\n", configFile) + } + + // Load configs: + configs := make(ConfigSlice, 0) + for _, configFile := range configFiles { + config, err := LoadConfig(configFile) + if err != nil { + return cli.Exit(fmt.Sprintf("failed to load config file %q: %s", configFile, err.Error()), 1) + } + configs = append(configs, config) + } + // Validate configs: + if err := configs.Validate(); err != nil { + return cli.Exit(fmt.Sprintf("error validating configs: %s", err.Error()), 1) + } + configs.SortByEpoch() + klog.Infof("Loaded %d epoch configs", len(configs)) + klog.Info("Initializing epochs...") + + epochs := make([]*Epoch, 0) + wg := new(errgroup.Group) + wg.SetLimit(epochLoadConcurrency) + mu := &sync.Mutex{} + for confIndex := range configs { + config := configs[confIndex] + wg.Go(func() error { + epoch, err := NewEpochFromConfig(config, c) + if err != nil { + return fmt.Errorf("failed to create epoch from config %q: %s", config.ConfigFilepath(), err.Error()) + } + mu.Lock() + defer mu.Unlock() + epochs = append(epochs, epoch) + return nil + }) + } + if err := wg.Wait(); err != nil { + return cli.Exit(fmt.Sprintf("failed to initialize epochs: %s", err.Error()), 1) + } + // Sort epochs by epoch number: + sort.Slice(epochs, func(i, j int) bool { + return epochs[i].Epoch() < epochs[j].Epoch() + }) + + multi := NewMultiEpoch(&Options{ + GsfaOnlySignatures: gsfaOnlySignatures, + EpochSearchConcurrency: epochSearchConcurrency, + }) + + for _, epoch := range epochs { + if err := multi.AddEpoch(epoch.Epoch(), epoch); err != nil { + return cli.Exit(fmt.Sprintf("failed to add epoch %d: %s", epoch.Epoch(), err.Error()), 1) + } + } + + if watch { + dirs, err := GetListOfDirectories( + src, + includePatterns.Value(), + excludePatterns.Value(), + ) + if err != nil { + return cli.Exit(err.Error(), 1) + } + klog.Infof("Found %d directories; will start watching them for changes ...", len(dirs)) + spew.Dump(dirs) + + ctx, cancel := context.WithCancel(c.Context) + defer cancel() + + // create a map that tracks files that are already being processed because of an event: + // this is to avoid processing the same file multiple times + // (e.g. if a file is create and then modified, we don't want to process it twice) + fileProcessingTracker := make(map[string]struct{}) + mu := &sync.Mutex{} + + err = onFileChanged(ctx, dirs, func(event fsnotify.Event) { + if !isJSONFile(event.Name) && !isYAMLFile(event.Name) { + klog.Infof("File %q is not a JSON or YAML file; do nothing", event.Name) + return + } + klog.Infof("File event: name=%q, op=%q", event.Name, event.Op) + + if event.Op != fsnotify.Remove && multi.HasEpochWithSameHashAsFile(event.Name) { + klog.Infof("Epoch with same hash as file %q is already loaded; do nothing", event.Name) + return + } + // register the file as being processed + mu.Lock() + _, ok := fileProcessingTracker[event.Name] + if ok { + klog.Infof("File %q is already being processed; do nothing", event.Name) + mu.Unlock() + return + } + fileProcessingTracker[event.Name] = struct{}{} + mu.Unlock() + // remove the file from the tracker when we're done processing it + defer func() { + mu.Lock() + delete(fileProcessingTracker, event.Name) + mu.Unlock() + }() + + switch event.Op { + case fsnotify.Write: + { + startedAt := time.Now() + klog.Infof("File %q was modified; processing...", event.Name) + // find the config file, load it, and update the epoch (replace) + config, err := LoadConfig(event.Name) + if err != nil { + klog.Errorf("error loading config file %q: %s", event.Name, err.Error()) + return + } + epoch, err := NewEpochFromConfig(config, c) + if err != nil { + klog.Errorf("error creating epoch from config file %q: %s", event.Name, err.Error()) + return + } + err = multi.ReplaceOrAddEpoch(epoch.Epoch(), epoch) + if err != nil { + klog.Errorf("error replacing epoch %d: %s", epoch.Epoch(), err.Error()) + return + } + klog.Infof("Epoch %d added/replaced in %s", epoch.Epoch(), time.Since(startedAt)) + } + case fsnotify.Create: + { + startedAt := time.Now() + klog.Infof("File %q was created; processing...", event.Name) + // find the config file, load it, and add it to the multi-epoch (if not already added) + config, err := LoadConfig(event.Name) + if err != nil { + klog.Errorf("error loading config file %q: %s", event.Name, err.Error()) + return + } + epoch, err := NewEpochFromConfig(config, c) + if err != nil { + klog.Errorf("error creating epoch from config file %q: %s", event.Name, err.Error()) + return + } + err = multi.AddEpoch(epoch.Epoch(), epoch) + if err != nil { + klog.Errorf("error adding epoch %d: %s", epoch.Epoch(), err.Error()) + return + } + klog.Infof("Epoch %d added in %s", epoch.Epoch(), time.Since(startedAt)) + } + case fsnotify.Remove: + { + startedAt := time.Now() + klog.Infof("File %q was removed; processing...", event.Name) + // find the epoch that corresponds to this file, and remove it (if any) + epNumber, err := multi.RemoveEpochByConfigFilepath(event.Name) + if err != nil { + klog.Errorf("error removing epoch for config file %q: %s", event.Name, err.Error()) + } + klog.Infof("Epoch %d removed in %s", epNumber, time.Since(startedAt)) + } + case fsnotify.Rename: + klog.Infof("File %q was renamed; do nothing", event.Name) + case fsnotify.Chmod: + klog.Infof("File %q had its permissions changed; do nothing", event.Name) + default: + klog.Infof("File %q had an unknown event %q; do nothing", event.Name, event.Op) + } + }) + if err != nil { + return cli.Exit(err.Error(), 1) + } + } + + var listenerConfig *ListenerConfig + if pathForProxyForUnknownRpcMethods != "" { + proxyConfig, err := LoadProxyConfig(pathForProxyForUnknownRpcMethods) + if err != nil { + return cli.Exit(fmt.Sprintf("failed to load proxy config file %q: %s", pathForProxyForUnknownRpcMethods, err.Error()), 1) + } + listenerConfig = &ListenerConfig{ + ProxyConfig: proxyConfig, + } + } + + return multi.ListenAndServe(c.Context, listenOn, listenerConfig) + }, + } +} + +// TODO: +// - [ ] get the list of provided arguments, and distinguish between files and directories +// - [ ] load all the config files, etc. +// - [ ] start a goroutine that monitors the config files for changes +// - [ ] when a config file changes, reload it and update the epoch +// - [ ] start a goroutine that monitors the directories and subdirectories for changes (new files, deleted files, etc.) +// - is only watching directories sufficient? or do we need to watch files too? +func onFileChanged(ctx context.Context, dirs []string, callback func(fsnotify.Event)) error { + // monitor a directory for file changes + watcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("failed to create watcher: %w", err) + } + + // start watching the directories + for _, path := range dirs { + err = watcher.Add(path) + if err != nil { + return fmt.Errorf("failed to add path %q to watcher: %w", path, err) + } + } + + // start a goroutine to handle events + go func() { + defer watcher.Close() + for { + select { + case <-ctx.Done(): + return + case event, ok := <-watcher.Events: + if !ok { + return + } + callback(event) + case err, ok := <-watcher.Errors: + if !ok { + return + } + klog.Errorf("error watching files: %v", err) + } + } + }() + + return nil +} + +// GetListOfDirectories returns a list of all the directories in the given directories and subdirectories +// that match one of the given patterns. +// The directories are first matched against the include patterns, and then against the exclude patterns. +// If no include patterns are provided, then all directories are included. +// If no exclude patterns are provided, then no directories are excluded. +// The `.git` directory is always excluded. +func GetListOfDirectories(src []string, includePatterns []string, excludePatterns []string) ([]string, error) { + var allDirs []string + + for _, srcItem := range src { + isDir, err := isDirectory(srcItem) + if err != nil { + return nil, err + } + if isDir { + dirs, err := getDeepDirectories(srcItem, includePatterns, excludePatterns) + if err != nil { + return nil, err + } + allDirs = append(allDirs, dirs...) + } else { + if matchesWithIncludeExcludePatterns(srcItem, includePatterns, excludePatterns) { + allDirs = append(allDirs, srcItem) + } + } + } + + deduped := deduplicate(allDirs) + return deduped, nil +} + +func matchesWithIncludeExcludePatterns(item string, includePatterns []string, excludePatterns []string) bool { + if len(includePatterns) == 0 && len(excludePatterns) == 0 { + return true + } + if len(includePatterns) > 0 { + _, ok := hasMatch(item, includePatterns) + if !ok { + return false + } + } + if len(excludePatterns) > 0 { + _, ok := hasMatch(item, excludePatterns) + if ok { + return false + } + } + return true +} + +func getDeepDirectories(dir string, includePatterns []string, excludePatterns []string) ([]string, error) { + ok, err := exists(dir) + if err != nil { + return nil, fmt.Errorf("error checking if path %q exists: %w", dir, err) + } + if !ok { + return nil, fmt.Errorf("path %q does not exist", dir) + } + + isDir, err := isDirectory(dir) + if err != nil { + return nil, fmt.Errorf("error checking if path %q is a directory: %w", dir, err) + } + if !isDir { + return nil, fmt.Errorf("path %q is not a directory", dir) + } + + dirs, err := walkDirectoryMatchingSubdirectories(dir, includePatterns, excludePatterns) + if err != nil { + return nil, fmt.Errorf("error walking directory %q: %w", dir, err) + } + + return dirs, nil +} + +// GetListOfConfigFiles returns a list of all the config files in the given directories and subdirectories +// that match one of the given patterns. +// The files are first matched against the file extension patterns, then against the include patterns, +// and finally against the exclude patterns. +func GetListOfConfigFiles(src []string, includePatterns []string, excludePatterns []string) ([]string, error) { + fileExtensionPatterns := []string{"*.yaml", "*.yml", "*.json"} + + var allFiles []string + + for _, srcItem := range src { + isDir, err := isDirectory(srcItem) + if err != nil { + return nil, err + } + if isDir { + files, err := getDeepFilesFromDirectory(srcItem, func(entry string) bool { + return itemMatchesAnyPattern(entry, fileExtensionPatterns...) && matchesWithIncludeExcludePatterns(entry, includePatterns, excludePatterns) + }) + if err != nil { + return nil, err + } + allFiles = append(allFiles, files...) + } else { + if itemMatchesAnyPattern(srcItem, fileExtensionPatterns...) && matchesWithIncludeExcludePatterns(srcItem, includePatterns, excludePatterns) { + allFiles = append(allFiles, srcItem) + } + } + } + + return deduplicate(allFiles), nil +} + +// getDeepFilesFromDirectory returns a list of all the files in the given directory and its subdirectories +// that match one of the given patterns. +func getDeepFilesFromDirectory(dir string, filter func(string) bool) ([]string, error) { + ok, err := exists(dir) + if err != nil { + return nil, fmt.Errorf("error checking if path %q exists: %w", dir, err) + } + if !ok { + return nil, fmt.Errorf("path %q does not exist", dir) + } + + isDir, err := isDirectory(dir) + if err != nil { + return nil, fmt.Errorf("error checking if path %q is a directory: %w", dir, err) + } + if !isDir { + return nil, fmt.Errorf("path %q is not a directory", dir) + } + + files, err := walkDirectoryMatchingFiles(dir, filter) + if err != nil { + return nil, fmt.Errorf("error walking directory %q: %w", dir, err) + } + + return files, nil +} + +// wallk a given directory and return a list of all the files that match the given patterns +func walkDirectoryMatchingFiles(dir string, filter func(string) bool) ([]string, error) { + var matching []string + + err := fs.WalkDir(os.DirFS(dir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + klog.Errorf("error walking path %q: %v", path, err) + return err + } + if d.IsDir() { + return nil + } + path, err = filepath.Abs(filepath.Join(dir, path)) + if err != nil { + return err + } + matches := filter(path) + if matches { + matching = append(matching, path) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("error walking directory %q: %w", dir, err) + } + + return matching, nil +} + +func walkDirectoryMatchingSubdirectories(dir string, includePatterns []string, excludePatterns []string) ([]string, error) { + var matching []string + + err := fs.WalkDir(os.DirFS(dir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + klog.Errorf("error walking path %q: %v", path, err) + return err + } + if !d.IsDir() { + return nil + } + path, err = filepath.Abs(filepath.Join(dir, path)) + if err != nil { + return err + } + { + // if matches `.git` then exclude it + if d.IsDir() && (d.Name() == ".git") { + return filepath.SkipDir + } + } + matches := matchesWithIncludeExcludePatterns(path, includePatterns, excludePatterns) + if matches { + matching = append(matching, path) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("error walking directory %q: %w", dir, err) + } + + return matching, nil +} + +func selectMatching(items []string, patterns ...string) []string { + var matching []string + for _, item := range items { + matches := itemMatchesAnyPattern(item, patterns...) + if matches { + matching = append(matching, item) + } + } + return matching +} + +func selectNotMatching(items []string, patterns ...string) []string { + var matching []string + for _, item := range items { + matches := itemMatchesAnyPattern(item, patterns...) + if !matches { + matching = append(matching, item) + } + } + return matching +} + +func itemMatchesAnyPattern(item string, patterns ...string) bool { + _, ok := hasMatch(item, patterns) + return ok +} + +// hasMatch finds the matching pattern (glob) to which the provided item matches. +func hasMatch(item string, patterns []string) (string, bool) { + if item == "" { + return "", false + } + + // sort the patterns in increasing length order: + sort.Strings(patterns) + + // first, try to find a precise match: + for _, pattern := range patterns { + if pattern == item { + return pattern, true + } + } + // ... then look for a glob match: + for _, pattern := range patterns { + if isMatch := glob.Glob(pattern, item); isMatch { + return pattern, true + } + } + return "", false +} + +func deduplicate(items []string) []string { + seen := make(map[string]struct{}) + var deduped []string + for _, item := range items { + if _, ok := seen[item]; !ok { + seen[item] = struct{}{} + deduped = append(deduped, item) + } + } + return deduped +} diff --git a/cmd-x-index-all.go b/cmd-x-index-all.go index 0be896ca..db06ffdf 100644 --- a/cmd-x-index-all.go +++ b/cmd-x-index-all.go @@ -15,6 +15,7 @@ import ( "github.com/gagliardetto/solana-go" "github.com/ipfs/go-cid" carv1 "github.com/ipld/go-car" + "github.com/rpcpool/yellowstone-faithful/bucketteer" "github.com/rpcpool/yellowstone-faithful/compactindex" "github.com/rpcpool/yellowstone-faithful/compactindex36" "github.com/rpcpool/yellowstone-faithful/iplddecoders" @@ -55,7 +56,7 @@ func newCmd_Index_all() *cli.Command { if indexDir == "" { return fmt.Errorf("missing index-dir argument") } - if ok, err := IsDir(indexDir); err != nil { + if ok, err := isDirectory(indexDir); err != nil { return err } else if !ok { return fmt.Errorf("index-dir is not a directory") @@ -71,8 +72,8 @@ func newCmd_Index_all() *cli.Command { if err != nil { return err } - spew.Dump(indexPaths) - klog.Info("Indexes created.") + klog.Info("Indexes created:") + veryPlainSdumpConfig.Dump(indexPaths) if verify { return verifyAllIndexes(context.Background(), carPath, indexPaths) } @@ -83,12 +84,14 @@ func newCmd_Index_all() *cli.Command { } } -func IsDir(path string) (bool, error) { - info, err := os.Stat(path) - if err != nil { - return false, err - } - return info.IsDir(), nil +var veryPlainSdumpConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + DisableMethods: true, + DisablePointerMethods: true, + ContinueOnMethod: true, + SortKeys: true, } func createAllIndexes( @@ -120,6 +123,11 @@ func createAllIndexes( if len(rd.header.Roots) != 1 { return nil, fmt.Errorf("car file must have exactly 1 root, but has %d", len(rd.header.Roots)) } + // print roots: + for _, root := range rd.header.Roots { + klog.Infof("- Root: %s", root) + } + rootCID := rd.header.Roots[0] klog.Infof("Getting car file size") targetFileSize, err := getFileSize(carPath) @@ -177,6 +185,15 @@ func createAllIndexes( } defer sig_to_cid.Close() + sigExistsFilepath := formatSigExistsIndexFilePath(indexDir, carPath, rootCID.String()) + sig_exists, err := bucketteer.NewWriter( + sigExistsFilepath, + ) + if err != nil { + return nil, fmt.Errorf("failed to create sig_exists index: %w", err) + } + defer sig_exists.Close() + totalOffset := uint64(0) { var buf bytes.Buffer @@ -239,6 +256,9 @@ func createAllIndexes( if err != nil { return nil, fmt.Errorf("failed to index signature to cid: %w", err) } + + sig_exists.Put(sig) + numIndexedTransactions++ } } @@ -250,7 +270,7 @@ func createAllIndexes( } if numIndexedOffsets%10_000_000 == 0 { timeFor10_000_000 := time.Since(lastCheckpoint) - howMany10_000_000 := (numTotalItems - numIndexedOffsets) / 10_000_000 + howMany10_000_000 := ((numTotalItems - numIndexedOffsets) / 10_000_000) + 1 eta := timeFor10_000_000 * time.Duration(howMany10_000_000) printToStderr( @@ -274,33 +294,50 @@ func createAllIndexes( klog.Infof("Preparing to seal indexes...") - rootCID := rd.header.Roots[0] paths := &IndexPaths{} + paths.SignatureExists = sigExistsFilepath klog.Infof("Root CID: %s", rootCID) { // seal the indexes - klog.Infof("Sealing cid_to_offset index...") - paths.CidToOffset, err = cid_to_offset.Seal(ctx, carPath, rootCID) - if err != nil { - return nil, fmt.Errorf("failed to seal cid_to_offset index: %w", err) + { + klog.Infof("Sealing cid_to_offset index...") + paths.CidToOffset, err = cid_to_offset.Seal(ctx, carPath, rootCID) + if err != nil { + return nil, fmt.Errorf("failed to seal cid_to_offset index: %w", err) + } + klog.Infof("Successfully sealed cid_to_offset index: %s", paths.CidToOffset) } - klog.Infof("Successfully sealed cid_to_offset index: %s", paths.CidToOffset) - klog.Infof("Sealing slot_to_cid index...") - paths.SlotToCid, err = slot_to_cid.Seal(ctx, carPath, rootCID) - if err != nil { - return nil, fmt.Errorf("failed to seal slot_to_cid index: %w", err) + { + klog.Infof("Sealing slot_to_cid index...") + paths.SlotToCid, err = slot_to_cid.Seal(ctx, carPath, rootCID) + if err != nil { + return nil, fmt.Errorf("failed to seal slot_to_cid index: %w", err) + } + klog.Infof("Successfully sealed slot_to_cid index: %s", paths.SlotToCid) } - klog.Infof("Successfully sealed slot_to_cid index: %s", paths.SlotToCid) - klog.Infof("Sealing sig_to_cid index...") - paths.SignatureToCid, err = sig_to_cid.Seal(ctx, carPath, rootCID) - if err != nil { - return nil, fmt.Errorf("failed to seal sig_to_cid index: %w", err) + { + klog.Infof("Sealing sig_to_cid index...") + paths.SignatureToCid, err = sig_to_cid.Seal(ctx, carPath, rootCID) + if err != nil { + return nil, fmt.Errorf("failed to seal sig_to_cid index: %w", err) + } + klog.Infof("Successfully sealed sig_to_cid index: %s", paths.SignatureToCid) + } + + { + klog.Infof("Sealing sig_exists index...") + meta := map[string]string{ + "root_cid": rootCID.String(), + } + if _, err = sig_exists.Seal(meta); err != nil { + return nil, fmt.Errorf("failed to seal sig_exists index: %w", err) + } + klog.Infof("Successfully sealed sig_exists index: %s", paths.SignatureExists) } - klog.Infof("Successfully sealed sig_to_cid index: %s", paths.SignatureToCid) } return paths, nil @@ -315,9 +352,10 @@ func blackText(s string) string { } type IndexPaths struct { - CidToOffset string - SlotToCid string - SignatureToCid string + CidToOffset string + SlotToCid string + SignatureToCid string + SignatureExists string } type Builder_CidToOffset struct { @@ -545,6 +583,17 @@ func verifyAllIndexes( } defer sig_to_cid.Close() + var sig_exists *bucketteer.Reader + if indexes.SignatureExists != "" { + sig_exists, err = bucketteer.Open( + indexes.SignatureExists, + ) + if err != nil { + return fmt.Errorf("failed to open sig_exists index: %w", err) + } + defer sig_exists.Close() + } + totalOffset := uint64(0) { var buf bytes.Buffer @@ -617,6 +666,14 @@ func verifyAllIndexes( if !got.Equals(_cid) { return fmt.Errorf("sig to cid mismatch for sig %s: expected cid %s, got %s", sig, _cid, got) } + + if sig_exists != nil { + if has, err := sig_exists.Has(sig); err != nil { + return fmt.Errorf("failed to check if sig exists in sig_exists index: %w", err) + } else if !has { + return fmt.Errorf("sig %s does not exist in sig_exists index", sig) + } + } numIndexedTransactions++ } } diff --git a/cmd-x-index-cid2offset.go b/cmd-x-index-cid2offset.go index e72198c3..e62336c9 100644 --- a/cmd-x-index-cid2offset.go +++ b/cmd-x-index-cid2offset.go @@ -36,7 +36,7 @@ func newCmd_Index_cid2offset() *cli.Command { indexDir := c.Args().Get(1) tmpDir := c.String("tmp-dir") - if ok, err := IsDir(indexDir); err != nil { + if ok, err := isDirectory(indexDir); err != nil { return err } else if !ok { return fmt.Errorf("index-dir is not a directory") diff --git a/cmd-x-index-gsfa.go b/cmd-x-index-gsfa.go index 982d403d..e538c2f3 100644 --- a/cmd-x-index-gsfa.go +++ b/cmd-x-index-gsfa.go @@ -21,6 +21,7 @@ import ( "github.com/rpcpool/yellowstone-faithful/gsfa" "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" "github.com/rpcpool/yellowstone-faithful/iplddecoders" + "github.com/rpcpool/yellowstone-faithful/readahead" concurrently "github.com/tejzpr/ordered-concurrently/v3" "github.com/urfave/cli/v2" "k8s.io/klog/v2" @@ -67,7 +68,11 @@ func newCmd_Index_gsfa() *cli.Command { defer file.Close() } - rd, err := car.NewCarReader(file) + cachingReader, err := readahead.NewCachingReaderFromReader(file, readahead.DefaultChunkSize) + if err != nil { + klog.Exitf("Failed to create caching reader: %s", err) + } + rd, err := car.NewCarReader(cachingReader) if err != nil { klog.Exitf("Failed to open CAR: %s", err) } @@ -85,7 +90,7 @@ func newCmd_Index_gsfa() *cli.Command { } indexDir := c.Args().Get(1) - if ok, err := IsDir(indexDir); err != nil { + if ok, err := isDirectory(indexDir); err != nil { return err } else if !ok { return fmt.Errorf("index-dir is not a directory") @@ -172,9 +177,12 @@ func newCmd_Index_gsfa() *cli.Command { for { block, err := rd.Next() - if errors.Is(err, io.EOF) { - fmt.Println("EOF") - break + if err != nil { + if errors.Is(err, io.EOF) { + fmt.Println("EOF") + break + } + return err } kind := iplddecoders.Kind(block.RawData()[1]) diff --git a/cmd-x-index-sig-exists.go b/cmd-x-index-sig-exists.go new file mode 100644 index 00000000..7faf87eb --- /dev/null +++ b/cmd-x-index-sig-exists.go @@ -0,0 +1,287 @@ +package main + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" + "github.com/gagliardetto/solana-go" + "github.com/ipfs/go-libipfs/blocks" + "github.com/ipld/go-car" + "github.com/rpcpool/yellowstone-faithful/bucketteer" + "github.com/rpcpool/yellowstone-faithful/iplddecoders" + "github.com/rpcpool/yellowstone-faithful/readahead" + concurrently "github.com/tejzpr/ordered-concurrently/v3" + "github.com/urfave/cli/v2" + "k8s.io/klog/v2" +) + +func newCmd_Index_sigExists() *cli.Command { + var verify bool + return &cli.Command{ + Name: "sig-exists", + Description: "Create sig-exists index from a CAR file", + ArgsUsage: " ", + Before: func(c *cli.Context) error { + return nil + }, + Flags: []cli.Flag{ + // verify hash of transactions: + &cli.BoolFlag{ + Name: "verify-hash", + Usage: "verify hash of transactions", + Value: false, + }, + // w number of workers: + &cli.UintFlag{ + Name: "w", + Usage: "number of workers", + Value: uint(runtime.NumCPU()) * 3, + }, + &cli.BoolFlag{ + Name: "verify", + Usage: "verify the index after creating it", + Destination: &verify, + }, + }, + Action: func(c *cli.Context) error { + carPath := c.Args().First() + var file fs.File + var err error + if carPath == "-" { + file = os.Stdin + } else { + file, err = os.Open(carPath) + if err != nil { + klog.Exit(err.Error()) + } + defer file.Close() + } + + cachingReader, err := readahead.NewCachingReaderFromReader(file, readahead.DefaultChunkSize) + if err != nil { + klog.Exitf("Failed to create caching reader: %s", err) + } + rd, err := car.NewCarReader(cachingReader) + if err != nil { + klog.Exitf("Failed to open CAR: %s", err) + } + rootCID := rd.Header.Roots[0] + { + roots := rd.Header.Roots + klog.Infof("Roots: %d", len(roots)) + for i, root := range roots { + if i == 0 && len(roots) == 1 { + klog.Infof("- %s (Epoch CID)", root.String()) + } else { + klog.Infof("- %s", root.String()) + } + } + } + + indexDir := c.Args().Get(1) + + if ok, err := isDirectory(indexDir); err != nil { + return err + } else if !ok { + return fmt.Errorf("index-dir is not a directory") + } + + klog.Infof("Creating sig-exists index for %s", carPath) + indexFilePath := formatSigExistsIndexFilePath(indexDir, carPath, rootCID.String()) + index, err := bucketteer.NewWriter( + indexFilePath, + ) + if err != nil { + return fmt.Errorf("error while opening sig-exists index writer: %w", err) + } + defer func() { + if err := index.Close(); err != nil { + klog.Errorf("Error while closing: %s", err) + } + }() + + startedAt := time.Now() + numTransactionsSeen := 0 + defer func() { + klog.Infof("Finished in %s", time.Since(startedAt)) + klog.Infof("Indexed %s transactions", humanize.Comma(int64(numTransactionsSeen))) + }() + dotEvery := 100_000 + klog.Infof("A dot is printed every %s transactions", humanize.Comma(int64(dotEvery))) + + verifyHash = c.Bool("verify-hash") + numWorkers := c.Uint("w") + + if numWorkers == 0 { + numWorkers = uint(runtime.NumCPU()) + } + workerInputChan := make(chan concurrently.WorkFunction, numWorkers) + waitExecuted := new(sync.WaitGroup) + waitResultsReceived := new(sync.WaitGroup) + numReceivedAtomic := new(atomic.Int64) + + outputChan := concurrently.Process( + c.Context, + workerInputChan, + &concurrently.Options{PoolSize: int(numWorkers), OutChannelBuffer: int(numWorkers)}, + ) + go func() { + // process the results from the workers + for result := range outputChan { + switch resValue := result.Value.(type) { + case error: + panic(resValue) + case SignatureAndSlot: + sig := resValue.Signature + { + index.Put(sig) + } + waitResultsReceived.Done() + numReceivedAtomic.Add(-1) + default: + panic(fmt.Errorf("unexpected result type: %T", result.Value)) + } + } + }() + + for { + block, err := rd.Next() + if err != nil { + if errors.Is(err, io.EOF) { + fmt.Println("EOF") + break + } + return err + } + kind := iplddecoders.Kind(block.RawData()[1]) + + switch kind { + case iplddecoders.KindTransaction: + numTransactionsSeen++ + if numTransactionsSeen%dotEvery == 0 { + fmt.Print(".") + } + if numTransactionsSeen%10_000_000 == 0 { + fmt.Println(humanize.Comma(int64(numTransactionsSeen))) + } + { + waitExecuted.Add(1) + waitResultsReceived.Add(1) + numReceivedAtomic.Add(1) + workerInputChan <- newSignatureSlot( + block, + func() { + waitExecuted.Done() + }, + ) + } + default: + continue + } + } + + { + klog.Infof("Waiting for all transactions to be processed...") + waitExecuted.Wait() + klog.Infof("All transactions processed.") + + klog.Infof("Waiting to receive all results...") + close(workerInputChan) + waitResultsReceived.Wait() + klog.Infof("All results received") + } + + klog.Info("Sealing index...") + sealingStartedAt := time.Now() + _, err = index.Seal( + map[string]string{ + "root_cid": rootCID.String(), + }, + ) + if err != nil { + return fmt.Errorf("error while sealing index: %w", err) + } + klog.Infof("Sealed index in %s", time.Since(sealingStartedAt)) + + klog.Infof("Success: sig-exists index created at %s", indexFilePath) + + if verify { + klog.Infof("Verifying index for %s located at %s", carPath, indexFilePath) + startedAt := time.Now() + defer func() { + klog.Infof("Finished in %s", time.Since(startedAt)) + }() + err := VerifyIndex_sigExists(context.TODO(), carPath, indexFilePath) + if err != nil { + return cli.Exit(err, 1) + } + klog.Info("Index verified") + return nil + } + return nil + }, + } +} + +func formatSigExistsIndexFilePath(indexDir string, carPath string, rootCID string) string { + return filepath.Join(indexDir, fmt.Sprintf("%s.%s.sig-exists.index", filepath.Base(carPath), rootCID)) +} + +var classicSpewConfig = spew.ConfigState{ + Indent: " ", + DisableMethods: true, + DisablePointerMethods: true, + DisablePointerAddresses: true, +} + +type SignatureAndSlot struct { + Slot uint64 + Signature solana.Signature +} + +type sigToEpochParser struct { + blk blocks.Block + done func() +} + +func newSignatureSlot( + blk blocks.Block, + done func(), +) *sigToEpochParser { + return &sigToEpochParser{ + blk: blk, + done: done, + } +} + +func (w sigToEpochParser) Run(ctx context.Context) interface{} { + defer func() { + w.done() + }() + + block := w.blk + + decoded, err := iplddecoders.DecodeTransaction(block.RawData()) + if err != nil { + return fmt.Errorf("error while decoding transaction from nodex %s: %w", block.Cid(), err) + } + sig, err := readFirstSignature(decoded.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to read signature: %w", err) + } + return SignatureAndSlot{ + Slot: uint64(decoded.Slot), + Signature: sig, + } +} diff --git a/cmd-x-index-sig2cid.go b/cmd-x-index-sig2cid.go index b8ffdc8d..ccde34cc 100644 --- a/cmd-x-index-sig2cid.go +++ b/cmd-x-index-sig2cid.go @@ -36,7 +36,7 @@ func newCmd_Index_sig2cid() *cli.Command { indexDir := c.Args().Get(1) tmpDir := c.String("tmp-dir") - if ok, err := IsDir(indexDir); err != nil { + if ok, err := isDirectory(indexDir); err != nil { return err } else if !ok { return fmt.Errorf("index-dir is not a directory") diff --git a/cmd-x-index-slot2cid.go b/cmd-x-index-slot2cid.go index fe50a0a1..75fbfc24 100644 --- a/cmd-x-index-slot2cid.go +++ b/cmd-x-index-slot2cid.go @@ -36,7 +36,7 @@ func newCmd_Index_slot2cid() *cli.Command { indexDir := c.Args().Get(1) tmpDir := c.String("tmp-dir") - if ok, err := IsDir(indexDir); err != nil { + if ok, err := isDirectory(indexDir); err != nil { return err } else if !ok { return fmt.Errorf("index-dir is not a directory") diff --git a/cmd-x-index.go b/cmd-x-index.go index 2e8bc517..73664051 100644 --- a/cmd-x-index.go +++ b/cmd-x-index.go @@ -16,8 +16,9 @@ func newCmd_Index() *cli.Command { newCmd_Index_cid2offset(), newCmd_Index_slot2cid(), newCmd_Index_sig2cid(), - newCmd_Index_all(), + newCmd_Index_all(), // NOTE: not actually all. newCmd_Index_gsfa(), + newCmd_Index_sigExists(), }, } } diff --git a/cmd-x-verify-index-all.go b/cmd-x-verify-index-all.go index 1b55a014..905ebec3 100644 --- a/cmd-x-verify-index-all.go +++ b/cmd-x-verify-index-all.go @@ -12,7 +12,7 @@ func newCmd_VerifyIndex_all() *cli.Command { return &cli.Command{ Name: "all", Description: "Verify all indexes.", - ArgsUsage: " ", + ArgsUsage: " ", Before: func(c *cli.Context) error { return nil }, @@ -22,6 +22,7 @@ func newCmd_VerifyIndex_all() *cli.Command { indexFilePathCid2Offset := c.Args().Get(1) indexFilePathSlot2Cid := c.Args().Get(2) indexFilePathSig2Cid := c.Args().Get(3) + indexFilePathSigExists := c.Args().Get(4) { startedAt := time.Now() @@ -33,9 +34,10 @@ func newCmd_VerifyIndex_all() *cli.Command { context.TODO(), carPath, &IndexPaths{ - CidToOffset: indexFilePathCid2Offset, - SlotToCid: indexFilePathSlot2Cid, - SignatureToCid: indexFilePathSig2Cid, + CidToOffset: indexFilePathCid2Offset, + SlotToCid: indexFilePathSlot2Cid, + SignatureToCid: indexFilePathSig2Cid, + SignatureExists: indexFilePathSigExists, }, ) if err != nil { diff --git a/cmd-x-verify-index-sig-exists.go b/cmd-x-verify-index-sig-exists.go new file mode 100644 index 00000000..faee3d71 --- /dev/null +++ b/cmd-x-verify-index-sig-exists.go @@ -0,0 +1,38 @@ +package main + +import ( + "context" + "time" + + "github.com/urfave/cli/v2" + "k8s.io/klog/v2" +) + +func newCmd_VerifyIndex_sigExists() *cli.Command { + return &cli.Command{ + Name: "sig-exists", + Description: "Verify the index that tells whether a signature exists in it", + ArgsUsage: " ", + Before: func(c *cli.Context) error { + return nil + }, + Flags: []cli.Flag{}, + Action: func(c *cli.Context) error { + carPath := c.Args().Get(0) + indexFilePath := c.Args().Get(1) + { + startedAt := time.Now() + defer func() { + klog.Infof("Finished in %s", time.Since(startedAt)) + }() + klog.Infof("Verifying sig-exists index for %s", carPath) + err := VerifyIndex_sigExists(context.TODO(), carPath, indexFilePath) + if err != nil { + return err + } + klog.Info("Index verified successfully") + } + return nil + }, + } +} diff --git a/cmd-x-verify-index.go b/cmd-x-verify-index.go index 767c73ad..7b142b79 100644 --- a/cmd-x-verify-index.go +++ b/cmd-x-verify-index.go @@ -16,6 +16,7 @@ func newCmd_VerifyIndex() *cli.Command { newCmd_VerifyIndex_cid2offset(), newCmd_VerifyIndex_slot2cid(), newCmd_VerifyIndex_sig2cid(), + newCmd_VerifyIndex_sigExists(), newCmd_VerifyIndex_all(), }, } diff --git a/compactindex/build.go b/compactindex/build.go index b8aef578..57eb06c0 100644 --- a/compactindex/build.go +++ b/compactindex/build.go @@ -267,11 +267,35 @@ func hashBucket(rd *bufio.Reader, entries []Entry, bitmap []byte, nonce uint32) } // Sort entries. - sort.Slice(entries, func(i, j int) bool { - return entries[i].Hash < entries[j].Hash + sortWithCompare(entries, func(i, j int) int { + if entries[i].Hash < entries[j].Hash { + return -1 + } else if entries[i].Hash > entries[j].Hash { + return 1 + } + return 0 }) return nil } var ErrCollision = errors.New("hash collision") + +func sortWithCompare[T any](a []T, compare func(i, j int) int) { + sort.Slice(a, func(i, j int) bool { + return compare(i, j) < 0 + }) + sorted := make([]T, len(a)) + eytzinger(a, sorted, 0, 1) + copy(a, sorted) +} + +func eytzinger[T any](in, out []T, i, k int) int { + if k <= len(in) { + i = eytzinger(in, out, i, 2*k) + out[k-1] = in[i] + i++ + i = eytzinger(in, out, i, 2*k+1) + } + return i +} diff --git a/compactindex/build_test.go b/compactindex/build_test.go index b5fe0840..ccf0b8a1 100644 --- a/compactindex/build_test.go +++ b/compactindex/build_test.go @@ -54,7 +54,7 @@ func TestBuilder(t *testing.T) { // num buckets 0x03, 0x00, 0x00, 0x00, // padding - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // --- Bucket header 0 // hash domain diff --git a/compactindex/compactindex.go b/compactindex/compactindex.go index 8556551e..1aeb18e7 100644 --- a/compactindex/compactindex.go +++ b/compactindex/compactindex.go @@ -91,6 +91,8 @@ import ( // Magic are the first eight bytes of an index. var Magic = [8]byte{'r', 'd', 'c', 'e', 'c', 'i', 'd', 'x'} +const Version = uint8(1) + // Header occurs once at the beginning of the index. type Header struct { FileSize uint64 @@ -110,9 +112,13 @@ func (h *Header) Load(buf *[headerSize]byte) error { FileSize: binary.LittleEndian.Uint64(buf[8:16]), NumBuckets: binary.LittleEndian.Uint32(buf[16:20]), } - // 12 bytes to spare for now. Might use it in the future. + // Check version. + if buf[20] != Version { + return fmt.Errorf("unsupported index version: want %d, got %d", Version, buf[20]) + } + // 11 bytes to spare for now. Might use it in the future. // Force to zero for now. - for _, b := range buf[20:32] { + for _, b := range buf[21:32] { if b != 0x00 { return fmt.Errorf("unsupported index version") } @@ -124,7 +130,8 @@ func (h *Header) Store(buf *[headerSize]byte) { copy(buf[0:8], Magic[:]) binary.LittleEndian.PutUint64(buf[8:16], h.FileSize) binary.LittleEndian.PutUint32(buf[16:20], h.NumBuckets) - for i := 20; i < 32; i++ { + buf[20] = Version + for i := 21; i < 32; i++ { buf[i] = 0 } } diff --git a/compactindex/query.go b/compactindex/query.go index 74f91c9a..49b5b31d 100644 --- a/compactindex/query.go +++ b/compactindex/query.go @@ -9,7 +9,8 @@ import ( // DB is a compactindex handle. type DB struct { Header - Stream io.ReaderAt + Stream io.ReaderAt + prefetch bool } // Open returns a handle to access a compactindex. @@ -33,6 +34,10 @@ func Open(stream io.ReaderAt) (*DB, error) { return db, nil } +func (db *DB) Prefetch(yes bool) { + db.prefetch = yes +} + // Lookup queries for a key in the index and returns the value (offset), if any. // // Returns ErrNotFound if the key is unknown. @@ -68,9 +73,26 @@ func (db *DB) GetBucket(i uint) (*Bucket, error) { return nil, readErr } bucket.Entries = io.NewSectionReader(db.Stream, int64(bucket.FileOffset), int64(bucket.NumEntries)*int64(bucket.Stride)) + if db.prefetch { + // TODO: find good value for numEntriesToPrefetch + numEntriesToPrefetch := minInt64(3_000, int64(bucket.NumEntries)) + prefetchSize := (4 + 3) * numEntriesToPrefetch + buf := make([]byte, prefetchSize) + _, err := bucket.Entries.ReadAt(buf, 0) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + } return bucket, nil } +func minInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + func (db *DB) entryStride() uint8 { hashSize := 3 // TODO remove hardcoded constant offsetSize := intWidth(db.FileSize) @@ -155,18 +177,22 @@ func (b *Bucket) Lookup(key []byte) (uint64, error) { func (b *Bucket) binarySearch(target uint64) (uint64, error) { low := 0 high := int(b.NumEntries) - for low <= high { - median := (low + high) / 2 - entry, err := b.loadEntry(median) + return searchEytzinger(low, high, target, b.loadEntry) +} + +func searchEytzinger(min int, max int, x uint64, getter func(int) (Entry, error)) (uint64, error) { + var index int + for index < max { + k, err := getter(index) if err != nil { return 0, err } - if entry.Hash == target { - return entry.Value, nil - } else if entry.Hash < target { - low = median + 1 - } else { - high = median - 1 + if k.Hash == x { + return k.Value, nil + } + index = index<<1 | 1 + if k.Hash < x { + index++ } } return 0, ErrNotFound diff --git a/compactindex/query_test.go b/compactindex/query_test.go index b275cc70..0908372a 100644 --- a/compactindex/query_test.go +++ b/compactindex/query_test.go @@ -42,7 +42,7 @@ func TestOpen_HeaderOnly(t *testing.T) { // NumBuckets 0x42, 0x00, 0x00, 0x00, // Padding - 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } diff --git a/compactindex36/build.go b/compactindex36/build.go index 4a0a4d6d..c1499673 100644 --- a/compactindex36/build.go +++ b/compactindex36/build.go @@ -141,12 +141,12 @@ func (b *Builder) sealBucket(ctx context.Context, i int, f *os.File) error { HashLen: 3, // TODO remove hardcoded constant FileOffset: uint64(offset), }, - Stride: 3 + intWidth(b.FileSize), // TODO remove hardcoded constant - OffsetWidth: intWidth(b.FileSize), + Stride: 3 + valueLength(), // TODO remove hardcoded constant + OffsetWidth: valueLength(), } // Write entries to file. wr := bufio.NewWriter(f) - entryBuf := make([]byte, desc.HashLen+intWidth(b.FileSize)) // TODO remove hardcoded constant + entryBuf := make([]byte, desc.HashLen+valueLength()) // TODO remove hardcoded constant for _, entry := range entries { desc.marshalEntry(entryBuf, entry) if _, err := wr.Write(entryBuf[:]); err != nil { @@ -273,11 +273,38 @@ func hashBucket(rd *bufio.Reader, entries []Entry, bitmap []byte, nonce uint32) } // Sort entries. - sort.Slice(entries, func(i, j int) bool { - return entries[i].Hash < entries[j].Hash + // sort.Slice(entries, func(i, j int) bool { + // return entries[i].Hash < entries[j].Hash + // }) + sortWithCompare(entries, func(i, j int) int { + if entries[i].Hash < entries[j].Hash { + return -1 + } else if entries[i].Hash > entries[j].Hash { + return 1 + } + return 0 }) return nil } var ErrCollision = errors.New("hash collision") + +func sortWithCompare[T any](a []T, compare func(i, j int) int) { + sort.Slice(a, func(i, j int) bool { + return compare(i, j) < 0 + }) + sorted := make([]T, len(a)) + eytzinger(a, sorted, 0, 1) + copy(a, sorted) +} + +func eytzinger[T any](in, out []T, i, k int) int { + if k <= len(in) { + i = eytzinger(in, out, i, 2*k) + out[k-1] = in[i] + i++ + i = eytzinger(in, out, i, 2*k+1) + } + return i +} diff --git a/compactindex36/build_test.go b/compactindex36/build_test.go index c0a10a3b..46fa70b1 100644 --- a/compactindex36/build_test.go +++ b/compactindex36/build_test.go @@ -197,7 +197,7 @@ func TestBuilder(t *testing.T) { // num buckets []byte{0x03, 0x00, 0x00, 0x00}, // 2 // padding - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // 3 + []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // 3 // --- Bucket header 0 // hash domain @@ -247,13 +247,13 @@ func TestBuilder(t *testing.T) { // value []byte{0x01, 0x71, 0x12, 0x20, 0x9c, 0xd0, 0x17, 0x9a, 0x19, 0x9c, 0xd9, 0x51, 0x0a, 0xfb, 0x92, 0x96, 0xcf, 0xd2, 0x9f, 0x77, 0x8a, 0x00, 0x40, 0x32, 0x8b, 0xf8, 0xff, 0x06, 0x46, 0x21, 0xb9, 0x3c, 0x57, 0xa5, 0xdd, 0x0f}, // 22 // hash - []byte{0xe3, 0x09, 0x6b}, // 23 - // value - []byte{0x1, 0x71, 0x12, 0x20, 0x60, 0x67, 0x54, 0xe4, 0x4c, 0x5, 0x99, 0x6f, 0xf9, 0x60, 0x66, 0x27, 0x66, 0xd, 0xa0, 0xda, 0x4f, 0x60, 0x10, 0x6, 0x2, 0x82, 0xf9, 0x46, 0x3d, 0xcc, 0xde, 0x28, 0x80, 0x72, 0x41, 0x67}, // 24 - // hash []byte{0x98, 0x3d, 0xbd}, // 25 // value []byte{0x01, 0x71, 0x12, 0x20, 0x1b, 0x79, 0x02, 0x6c, 0x3d, 0xdc, 0x74, 0x0c, 0x33, 0x71, 0xf0, 0x7a, 0x4b, 0x80, 0xb0, 0x43, 0x0c, 0x82, 0x0a, 0x88, 0x72, 0x13, 0xa6, 0x94, 0x72, 0xc9, 0xd1, 0x8a, 0x2d, 0xc7, 0x88, 0x13}, // 26 + // hash + []byte{0xe3, 0x09, 0x6b}, // 23 + // value + []byte{0x1, 0x71, 0x12, 0x20, 0x60, 0x67, 0x54, 0xe4, 0x4c, 0x5, 0x99, 0x6f, 0xf9, 0x60, 0x66, 0x27, 0x66, 0xd, 0xa0, 0xda, 0x4f, 0x60, 0x10, 0x6, 0x2, 0x82, 0xf9, 0x46, 0x3d, 0xcc, 0xde, 0x28, 0x80, 0x72, 0x41, 0x67}, // 24 ) assert.Equal(t, expected, buf) @@ -339,14 +339,14 @@ func TestBuilder(t *testing.T) { entries, err := buckets[2].Load( /*batchSize*/ 3) require.NoError(t, err) assert.Equal(t, []Entry{ - { - Hash: 7014883, - Value: [36]byte(testCids[2].Bytes()), - }, { Hash: 12402072, Value: [36]byte(testCids[3].Bytes()), }, + { + Hash: 7014883, + Value: [36]byte(testCids[2].Bytes()), + }, }, entries) { diff --git a/compactindex36/compactindex.go b/compactindex36/compactindex.go index 23e260e5..4720795d 100644 --- a/compactindex36/compactindex.go +++ b/compactindex36/compactindex.go @@ -96,6 +96,8 @@ import ( // Magic are the first eight bytes of an index. var Magic = [8]byte{'r', 'd', 'c', 'e', 'c', 'i', 'd', 'x'} +const Version = uint8(1) + // Header occurs once at the beginning of the index. type Header struct { FileSize uint64 @@ -115,9 +117,13 @@ func (h *Header) Load(buf *[headerSize]byte) error { FileSize: binary.LittleEndian.Uint64(buf[8:16]), NumBuckets: binary.LittleEndian.Uint32(buf[16:20]), } - // 12 bytes to spare for now. Might use it in the future. + // Check version. + if buf[20] != Version { + return fmt.Errorf("unsupported index version: want %d, got %d", Version, buf[20]) + } + // 11 bytes to spare for now. Might use it in the future. // Force to zero for now. - for _, b := range buf[20:32] { + for _, b := range buf[21:32] { if b != 0x00 { return fmt.Errorf("unsupported index version") } @@ -129,7 +135,8 @@ func (h *Header) Store(buf *[headerSize]byte) { copy(buf[0:8], Magic[:]) binary.LittleEndian.PutUint64(buf[8:16], h.FileSize) binary.LittleEndian.PutUint32(buf[16:20], h.NumBuckets) - for i := 20; i < 32; i++ { + buf[20] = Version + for i := 21; i < 32; i++ { buf[i] = 0 } } @@ -246,11 +253,8 @@ type Entry struct { Value [36]byte } -// intWidth returns the number of bytes minimally required to represent the given integer. -func intWidth(n uint64) uint8 { +func valueLength() uint8 { return 36 // 36 is the length of the CIDs we use. - msb := 64 - bits.LeadingZeros64(n) - return uint8((msb + 7) / 8) } // maxCls64 returns the max integer that has the same amount of leading zeros as n. @@ -272,5 +276,5 @@ func putUintLe(buf []byte, x uint64) bool { var full [8]byte binary.LittleEndian.PutUint64(full[:], x) copy(buf, full[:]) - return int(intWidth(x)) <= len(buf) + return int(valueLength()) <= len(buf) } diff --git a/compactindex36/query.go b/compactindex36/query.go index aca7cadb..c8e06a5c 100644 --- a/compactindex36/query.go +++ b/compactindex36/query.go @@ -14,7 +14,8 @@ import ( // DB is a compactindex handle. type DB struct { Header - Stream io.ReaderAt + Stream io.ReaderAt + prefetch bool } // Open returns a handle to access a compactindex. @@ -38,6 +39,10 @@ func Open(stream io.ReaderAt) (*DB, error) { return db, nil } +func (db *DB) Prefetch(yes bool) { + db.prefetch = yes +} + // Lookup queries for a key in the index and returns the value (offset), if any. // // Returns ErrNotFound if the key is unknown. @@ -64,7 +69,7 @@ func (db *DB) GetBucket(i uint) (*Bucket, error) { bucket := &Bucket{ BucketDescriptor: BucketDescriptor{ Stride: db.entryStride(), - OffsetWidth: intWidth(db.FileSize), + OffsetWidth: valueLength(), }, } // Read bucket header. @@ -73,12 +78,29 @@ func (db *DB) GetBucket(i uint) (*Bucket, error) { return nil, readErr } bucket.Entries = io.NewSectionReader(db.Stream, int64(bucket.FileOffset), int64(bucket.NumEntries)*int64(bucket.Stride)) + if db.prefetch { + // TODO: find good value for numEntriesToPrefetch + numEntriesToPrefetch := minInt64(3_000, int64(bucket.NumEntries)) + prefetchSize := (36 + 3) * numEntriesToPrefetch + buf := make([]byte, prefetchSize) + _, err := bucket.Entries.ReadAt(buf, 0) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + } return bucket, nil } +func minInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + func (db *DB) entryStride() uint8 { hashSize := 3 // TODO remove hardcoded constant - offsetSize := intWidth(db.FileSize) + offsetSize := valueLength() return uint8(hashSize) + offsetSize } @@ -162,21 +184,7 @@ var Empty [36]byte func (b *Bucket) binarySearch(target uint64) ([36]byte, error) { low := 0 high := int(b.NumEntries) - for low <= high { - median := (low + high) / 2 - entry, err := b.loadEntry(median) - if err != nil { - return Empty, err - } - if entry.Hash == target { - return entry.Value, nil - } else if entry.Hash < target { - low = median + 1 - } else { - high = median - 1 - } - } - return Empty, ErrNotFound + return searchEytzinger(low, high, target, b.loadEntry) } func (b *Bucket) loadEntry(i int) (Entry, error) { @@ -191,3 +199,21 @@ func (b *Bucket) loadEntry(i int) (Entry, error) { // ErrNotFound marks a missing entry. var ErrNotFound = errors.New("not found") + +func searchEytzinger(min int, max int, x uint64, getter func(int) (Entry, error)) ([36]byte, error) { + var index int + for index < max { + k, err := getter(index) + if err != nil { + return Empty, err + } + if k.Hash == x { + return k.Value, nil + } + index = index<<1 | 1 + if k.Hash < x { + index++ + } + } + return Empty, ErrNotFound +} diff --git a/compactindex36/query_test.go b/compactindex36/query_test.go index ef708bfb..64efd84d 100644 --- a/compactindex36/query_test.go +++ b/compactindex36/query_test.go @@ -42,7 +42,7 @@ func TestOpen_HeaderOnly(t *testing.T) { // NumBuckets 0x42, 0x00, 0x00, 0x00, // Padding - 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } diff --git a/config.go b/config.go new file mode 100644 index 00000000..a7699c4a --- /dev/null +++ b/config.go @@ -0,0 +1,300 @@ +package main + +import ( + "crypto/sha256" + "errors" + "fmt" + "io" + "os" + "sort" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type URI string + +// IsZero returns true if the URI is empty. +func (u URI) IsZero() bool { + return u == "" +} + +// IsValid returns true if the URI is not empty and is a valid URI. +func (u URI) IsValid() bool { + if u.IsZero() { + return false + } + return u.IsLocal() || u.IsRemoteWeb() || u.IsCID() || u.IsIPFS() || u.IsFilecoin() +} + +// IsLocal returns true if the URI is a local file or directory. +func (u URI) IsLocal() bool { + return (len(u) > 7 && u[:7] == "file://") || (len(u) > 1 && u[0] == '/') +} + +// IsRemoteWeb returns true if the URI is a remote web URI (HTTP or HTTPS). +func (u URI) IsRemoteWeb() bool { + // http:// or https:// + return len(u) > 7 && u[:7] == "http://" || len(u) > 8 && u[:8] == "https://" +} + +// IsCID returns true if the URI is a CID. +func (u URI) IsCID() bool { + if u.IsZero() { + return false + } + parsed, err := cid.Parse(string(u)) + return err == nil && parsed.Defined() +} + +// IsIPFS returns true if the URI is an IPFS URI. +func (u URI) IsIPFS() bool { + return len(u) > 6 && u[:6] == "ipfs://" +} + +// IsFilecoin returns true if the URI is a Filecoin URI. +func (u URI) IsFilecoin() bool { + return len(u) > 10 && u[:10] == "filecoin://" +} + +func LoadConfig(configFilepath string) (*Config, error) { + var config Config + if isJSONFile(configFilepath) { + if err := loadFromJSON(configFilepath, &config); err != nil { + return nil, err + } + } else if isYAMLFile(configFilepath) { + if err := loadFromYAML(configFilepath, &config); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("config file %q must be JSON or YAML", configFilepath) + } + config.originalFilepath = configFilepath + sum, err := hashFileSha256(configFilepath) + if err != nil { + return nil, fmt.Errorf("config file %q: %s", configFilepath, err.Error()) + } + config.hashOfConfigFile = sum + return &config, nil +} + +func hashFileSha256(filePath string) (string, error) { + f, err := os.Open(filePath) + if err != nil { + return "", err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +type Config struct { + originalFilepath string + hashOfConfigFile string + Epoch *uint64 `json:"epoch" yaml:"epoch"` + Data struct { + Car *struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"car" yaml:"car"` + Filecoin *struct { + // Enable enables Filecoin mode. If false, or if this section is not present, CAR mode is used. + Enable bool `json:"enable" yaml:"enable"` + RootCID cid.Cid `json:"root_cid" yaml:"root_cid"` + Providers []string `json:"providers" yaml:"providers"` + } `json:"filecoin" yaml:"filecoin"` + } `json:"data" yaml:"data"` + Indexes struct { + CidToOffset struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"cid_to_offset" yaml:"cid_to_offset"` + SlotToCid struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"slot_to_cid" yaml:"slot_to_cid"` + SigToCid struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"sig_to_cid" yaml:"sig_to_cid"` + Gsfa struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"gsfa" yaml:"gsfa"` + SigExists struct { + URI URI `json:"uri" yaml:"uri"` + } `json:"sig_exists" yaml:"sig_exists"` + } `json:"indexes" yaml:"indexes"` +} + +func (c *Config) ConfigFilepath() string { + return c.originalFilepath +} + +func (c *Config) HashOfConfigFile() string { + return c.hashOfConfigFile +} + +func (c *Config) IsSameHash(other *Config) bool { + return c.hashOfConfigFile == other.hashOfConfigFile +} + +func (c *Config) IsSameHashAsFile(filepath string) bool { + sum, err := hashFileSha256(filepath) + if err != nil { + return false + } + return c.hashOfConfigFile == sum +} + +// IsFilecoinMode returns true if the config is in Filecoin mode. +// This means that the data is going to be fetched from Filecoin directly (by CID). +func (c *Config) IsFilecoinMode() bool { + return c.Data.Filecoin != nil && c.Data.Filecoin.Enable +} + +type ConfigSlice []*Config + +func (c ConfigSlice) Validate() error { + for _, config := range c { + if err := config.Validate(); err != nil { + return fmt.Errorf("config file %q: %s", config.ConfigFilepath(), err.Error()) + } + } + { + // Check that all epochs are unique. + epochs := make(map[uint64][]string) + for _, config := range c { + epochs[*config.Epoch] = append(epochs[*config.Epoch], config.originalFilepath) + } + multiErrors := make([]error, 0) + for epoch, configFiles := range epochs { + if len(configFiles) > 1 { + multiErrors = append(multiErrors, fmt.Errorf("epoch %d is defined in multiple config files: %v", epoch, configFiles)) + } + } + if len(multiErrors) > 0 { + return errors.Join(multiErrors...) + } + } + return nil +} + +func (c ConfigSlice) SortByEpoch() { + sort.Slice(c, func(i, j int) bool { + return *c[i].Epoch < *c[j].Epoch + }) +} + +func isSupportedURI(uri URI, path string) error { + isSupported := uri.IsLocal() || uri.IsRemoteWeb() + if !isSupported { + return fmt.Errorf("%s must be a local file or a remote web URI", path) + } + return nil +} + +// Validate checks the config for errors. +func (c *Config) Validate() error { + if c.Epoch == nil { + return fmt.Errorf("epoch must be set") + } + // Distinguish between CAR-mode and Filecoin-mode. + // In CAR-mode, the data is fetched from a CAR file (local or remote). + // In Filecoin-mode, the data is fetched from Filecoin directly (by CID via Lassie). + isFilecoinMode := c.IsFilecoinMode() + isCarMode := !isFilecoinMode + if isCarMode { + if c.Data.Car == nil { + return fmt.Errorf("car-mode=true; data.car must be set") + } + if c.Data.Car.URI.IsZero() { + return fmt.Errorf("data.car.uri must be set") + } + if err := isSupportedURI(c.Data.Car.URI, "data.car.uri"); err != nil { + return err + } + if c.Indexes.CidToOffset.URI.IsZero() { + return fmt.Errorf("indexes.cid_to_offset.uri must be set") + } + if err := isSupportedURI(c.Indexes.CidToOffset.URI, "indexes.cid_to_offset.uri"); err != nil { + return err + } + } else { + if c.Data.Filecoin == nil { + return fmt.Errorf("car-mode=false; data.filecoin must be set") + } + if !c.Data.Filecoin.RootCID.Defined() { + return fmt.Errorf("data.filecoin.root_cid must be set") + } + // validate providers: + + for providerIndex, provider := range c.Data.Filecoin.Providers { + if provider == "" { + return fmt.Errorf("data.filecoin.providers must not be empty") + } + _, err := peer.AddrInfoFromString(provider) + if err != nil { + return fmt.Errorf("data.filecoin.providers[%d]: error parsing provider %q: %w", providerIndex, provider, err) + } + } + + } + + { + { + if c.Indexes.SlotToCid.URI.IsZero() { + return fmt.Errorf("indexes.slot_to_cid.uri must be set") + } + if err := isSupportedURI(c.Indexes.SlotToCid.URI, "indexes.slot_to_cid.uri"); err != nil { + return err + } + } + { + if c.Indexes.SigToCid.URI.IsZero() { + return fmt.Errorf("indexes.sig_to_cid.uri must be set") + } + if err := isSupportedURI(c.Indexes.SigToCid.URI, "indexes.sig_to_cid.uri"); err != nil { + return err + } + } + { + if c.Indexes.SigExists.URI.IsZero() { + return fmt.Errorf("indexes.sig_exists.uri must be set") + } + if err := isSupportedURI(c.Indexes.SigExists.URI, "indexes.sig_exists.uri"); err != nil { + return err + } + } + } + { + // check that the URIs are valid + if isCarMode { + if !c.Data.Car.URI.IsValid() { + return fmt.Errorf("data.car.uri is invalid") + } + if !c.Indexes.CidToOffset.URI.IsValid() { + return fmt.Errorf("indexes.cid_to_offset.uri is invalid") + } + } + if !c.Indexes.SlotToCid.URI.IsValid() { + return fmt.Errorf("indexes.slot_to_cid.uri is invalid") + } + if !c.Indexes.SigToCid.URI.IsValid() { + return fmt.Errorf("indexes.sig_to_cid.uri is invalid") + } + if !c.Indexes.SigExists.URI.IsValid() { + return fmt.Errorf("indexes.sig_exists.uri is invalid") + } + { + if !c.Indexes.Gsfa.URI.IsZero() && !c.Indexes.Gsfa.URI.IsValid() { + return fmt.Errorf("indexes.gsfa.uri is invalid") + } + // gsfa index (optional), if set, must be a local directory: + if !c.Indexes.Gsfa.URI.IsZero() && !c.Indexes.Gsfa.URI.IsLocal() { + return fmt.Errorf("indexes.gsfa.uri must be a local directory") + } + } + } + return nil +} diff --git a/epoch.go b/epoch.go new file mode 100644 index 00000000..73eed471 --- /dev/null +++ b/epoch.go @@ -0,0 +1,567 @@ +package main + +import ( + "bufio" + "context" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car/util" + carv2 "github.com/ipld/go-car/v2" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/patrickmn/go-cache" + "github.com/rpcpool/yellowstone-faithful/bucketteer" + "github.com/rpcpool/yellowstone-faithful/compactindex" + "github.com/rpcpool/yellowstone-faithful/compactindex36" + "github.com/rpcpool/yellowstone-faithful/gsfa" + "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" + "github.com/rpcpool/yellowstone-faithful/iplddecoders" + "github.com/urfave/cli/v2" + "k8s.io/klog/v2" +) + +type Epoch struct { + epoch uint64 + isFilecoinMode bool // true if the epoch is in Filecoin mode (i.e. Lassie mode) + config *Config + // contains indexes and block data for the epoch + lassieFetcher *lassieWrapper + localCarReader *carv2.Reader + remoteCarReader ReaderAtCloser + remoteCarHeaderSize uint64 + cidToOffsetIndex *compactindex.DB + slotToCidIndex *compactindex36.DB + sigToCidIndex *compactindex36.DB + sigExists *bucketteer.Reader + gsfaReader *gsfa.GsfaReader + cidToNodeCache *cache.Cache // TODO: prevent OOM + onClose []func() error + slotToCidCache *cache.Cache + cidToOffsetCache *cache.Cache +} + +func (r *Epoch) getSlotToCidFromCache(slot uint64) (cid.Cid, error, bool) { + if v, ok := r.slotToCidCache.Get(fmt.Sprint(slot)); ok { + return v.(cid.Cid), nil, true + } + return cid.Undef, nil, false +} + +func (r *Epoch) putSlotToCidInCache(slot uint64, c cid.Cid) { + r.slotToCidCache.Set(fmt.Sprint(slot), c, cache.DefaultExpiration) +} + +func (r *Epoch) getCidToOffsetFromCache(c cid.Cid) (uint64, error, bool) { + if v, ok := r.cidToOffsetCache.Get(c.String()); ok { + return v.(uint64), nil, true + } + return 0, nil, false +} + +func (r *Epoch) putCidToOffsetInCache(c cid.Cid, offset uint64) { + r.cidToOffsetCache.Set(c.String(), offset, cache.DefaultExpiration) +} + +func (e *Epoch) Epoch() uint64 { + return e.epoch +} + +func (e *Epoch) IsFilecoinMode() bool { + return e.isFilecoinMode +} + +// IsCarMode returns true if the epoch is in CAR mode. +// This means that the data is going to be fetched from a CAR file (local or remote). +func (e *Epoch) IsCarMode() bool { + return !e.isFilecoinMode +} + +func (e *Epoch) Close() error { + multiErr := make([]error, 0) + for _, fn := range e.onClose { + if err := fn(); err != nil { + multiErr = append(multiErr, err) + } + } + return errors.Join(multiErr...) +} + +func NewEpochFromConfig(config *Config, c *cli.Context) (*Epoch, error) { + if config == nil { + return nil, fmt.Errorf("config must not be nil") + } + isLassieMode := config.IsFilecoinMode() + isCarMode := !isLassieMode + + ep := &Epoch{ + epoch: *config.Epoch, + isFilecoinMode: isLassieMode, + config: config, + onClose: make([]func() error, 0), + } + + if isCarMode { + // The CAR-mode requires a cid-to-offset index. + cidToOffsetIndexFile, err := openIndexStorage( + c.Context, + string(config.Indexes.CidToOffset.URI), + DebugMode, + ) + if err != nil { + return nil, fmt.Errorf("failed to open cid-to-offset index file: %w", err) + } + ep.onClose = append(ep.onClose, cidToOffsetIndexFile.Close) + + cidToOffsetIndex, err := compactindex.Open(cidToOffsetIndexFile) + if err != nil { + return nil, fmt.Errorf("failed to open cid-to-offset index: %w", err) + } + if config.Indexes.CidToOffset.URI.IsRemoteWeb() { + cidToOffsetIndex.Prefetch(true) + } + ep.cidToOffsetIndex = cidToOffsetIndex + } + + { + slotToCidIndexFile, err := openIndexStorage( + c.Context, + string(config.Indexes.SlotToCid.URI), + DebugMode, + ) + if err != nil { + return nil, fmt.Errorf("failed to open slot-to-cid index file: %w", err) + } + ep.onClose = append(ep.onClose, slotToCidIndexFile.Close) + + slotToCidIndex, err := compactindex36.Open(slotToCidIndexFile) + if err != nil { + return nil, fmt.Errorf("failed to open slot-to-cid index: %w", err) + } + if config.Indexes.SlotToCid.URI.IsRemoteWeb() { + slotToCidIndex.Prefetch(true) + } + ep.slotToCidIndex = slotToCidIndex + } + + { + sigToCidIndexFile, err := openIndexStorage( + c.Context, + string(config.Indexes.SigToCid.URI), + DebugMode, + ) + if err != nil { + return nil, fmt.Errorf("failed to open sig-to-cid index file: %w", err) + } + ep.onClose = append(ep.onClose, sigToCidIndexFile.Close) + + sigToCidIndex, err := compactindex36.Open(sigToCidIndexFile) + if err != nil { + return nil, fmt.Errorf("failed to open sig-to-cid index: %w", err) + } + if config.Indexes.SigToCid.URI.IsRemoteWeb() { + sigToCidIndex.Prefetch(true) + } + ep.sigToCidIndex = sigToCidIndex + } + + { + if !config.Indexes.Gsfa.URI.IsZero() { + gsfaIndex, err := gsfa.NewGsfaReader(string(config.Indexes.Gsfa.URI)) + if err != nil { + return nil, fmt.Errorf("failed to open gsfa index: %w", err) + } + ep.onClose = append(ep.onClose, gsfaIndex.Close) + ep.gsfaReader = gsfaIndex + } + } + + if isLassieMode { + fetchProviderAddrInfos, err := ParseFilecoinProviders(config.Data.Filecoin.Providers...) + if err != nil { + return nil, fmt.Errorf("failed to parse Filecoin providers: %w", err) + } + ls, err := newLassieWrapper(c, fetchProviderAddrInfos) + if err != nil { + return nil, fmt.Errorf("newLassieWrapper: %w", err) + } + ep.lassieFetcher = ls + } + + if isCarMode { + localCarReader, remoteCarReader, err := openCarStorage(c.Context, string(config.Data.Car.URI)) + if err != nil { + return nil, fmt.Errorf("failed to open CAR file: %w", err) + } + if localCarReader != nil { + ep.onClose = append(ep.onClose, localCarReader.Close) + } + if remoteCarReader != nil { + ep.onClose = append(ep.onClose, remoteCarReader.Close) + } + ep.localCarReader = localCarReader + ep.remoteCarReader = remoteCarReader + if remoteCarReader != nil { + // read 10 bytes from the CAR file to get the header size + headerSizeBuf, err := readSectionFromReaderAt(remoteCarReader, 0, 10) + if err != nil { + return nil, fmt.Errorf("failed to read CAR header: %w", err) + } + // decode as uvarint + headerSize, n := binary.Uvarint(headerSizeBuf) + if n <= 0 { + return nil, fmt.Errorf("failed to decode CAR header size") + } + ep.remoteCarHeaderSize = uint64(n) + headerSize + } + } + { + sigExistsFile, err := openIndexStorage( + c.Context, + string(config.Indexes.SigExists.URI), + DebugMode, + ) + if err != nil { + return nil, fmt.Errorf("failed to open sig-exists index file: %w", err) + } + ep.onClose = append(ep.onClose, sigExistsFile.Close) + + sigExists, err := bucketteer.NewReader(sigExistsFile) + if err != nil { + return nil, fmt.Errorf("failed to open sig-exists index: %w", err) + } + ep.onClose = append(ep.onClose, sigExists.Close) + + { + // warm up the cache + for i := 0; i < 100_000; i++ { + sigExists.Has(newRandomSignature()) + } + } + + ep.sigExists = sigExists + } + + { + ca := cache.New(30*time.Second, 1*time.Minute) + ep.cidToNodeCache = ca + } + { + ca := cache.New(30*time.Second, 1*time.Minute) + ep.slotToCidCache = ca + } + { + ca := cache.New(30*time.Second, 1*time.Minute) + ep.cidToOffsetCache = ca + } + + return ep, nil +} + +func ParseFilecoinProviders(vs ...string) ([]peer.AddrInfo, error) { + providerAddrInfos := make([]peer.AddrInfo, 0, len(vs)) + + for _, v := range vs { + providerAddrInfo, err := peer.AddrInfoFromString(v) + if err != nil { + return nil, fmt.Errorf("failed to parse provider address %q: %w", v, err) + } + providerAddrInfos = append(providerAddrInfos, *providerAddrInfo) + } + return providerAddrInfos, nil +} + +func newRandomSignature() [64]byte { + var sig [64]byte + rand.Read(sig[:]) + return sig +} + +func (r *Epoch) getNodeFromCache(c cid.Cid) (v []byte, err error, has bool) { + if v, ok := r.cidToNodeCache.Get(c.String()); ok { + return v.([]byte), nil, true + } + return nil, nil, false +} + +func (r *Epoch) putNodeInCache(c cid.Cid, data []byte) { + r.cidToNodeCache.Set(c.String(), data, cache.DefaultExpiration) +} + +func (r *Epoch) Config() *Config { + return r.config +} + +func (s *Epoch) prefetchSubgraph(ctx context.Context, wantedCid cid.Cid) error { + if s.lassieFetcher != nil { + // Fetch the subgraph from lassie + sub, err := s.lassieFetcher.GetSubgraph(ctx, wantedCid) + if err == nil { + // put in cache + return sub.Each(ctx, func(c cid.Cid, data []byte) error { + s.putNodeInCache(c, data) + return nil + }) + } + klog.Errorf("failed to get subgraph from lassie: %v", err) + return err + } + return nil +} + +func (s *Epoch) GetNodeByCid(ctx context.Context, wantedCid cid.Cid) ([]byte, error) { + { + // try from cache + data, err, has := s.getNodeFromCache(wantedCid) + if err != nil { + return nil, err + } + if has { + return data, nil + } + } + if s.lassieFetcher != nil { + // Fetch the node from lassie. + data, err := s.lassieFetcher.GetNodeByCid(ctx, wantedCid) + if err == nil { + // put in cache + s.putNodeInCache(wantedCid, data) + return data, nil + } + klog.Errorf("failed to get node from lassie: %v", err) + return nil, err + } + // Find CAR file offset for CID in index. + offset, err := s.FindOffsetFromCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find offset for CID %s: %v", wantedCid, err) + // not found or error + return nil, err + } + return s.GetNodeByOffset(ctx, wantedCid, offset) +} + +func (s *Epoch) ReadAtFromCar(ctx context.Context, offset uint64, length uint64) ([]byte, error) { + if s.localCarReader == nil { + // try remote reader + if s.remoteCarReader == nil { + return nil, fmt.Errorf("no CAR reader available") + } + return readSectionFromReaderAt(s.remoteCarReader, offset, length) + } + // Get reader and seek to offset, then read node. + dr, err := s.localCarReader.DataReader() + if err != nil { + klog.Errorf("failed to get data reader: %v", err) + return nil, err + } + dr.Seek(int64(offset), io.SeekStart) + data := make([]byte, length) + _, err = io.ReadFull(dr, data) + if err != nil { + klog.Errorf("failed to read node: %v", err) + return nil, err + } + return data, nil +} + +func (s *Epoch) GetNodeByOffset(ctx context.Context, wantedCid cid.Cid, offset uint64) ([]byte, error) { + if s.localCarReader == nil { + // try remote reader + if s.remoteCarReader == nil { + return nil, fmt.Errorf("no CAR reader available") + } + return readNodeFromReaderAt(s.remoteCarReader, wantedCid, offset) + } + // Get reader and seek to offset, then read node. + dr, err := s.localCarReader.DataReader() + if err != nil { + klog.Errorf("failed to get data reader: %v", err) + return nil, err + } + dr.Seek(int64(offset), io.SeekStart) + br := bufio.NewReader(dr) + + gotCid, data, err := util.ReadNode(br) + if err != nil { + klog.Errorf("failed to read node: %v", err) + return nil, err + } + // verify that the CID we read matches the one we expected. + if !gotCid.Equals(wantedCid) { + klog.Errorf("CID mismatch: expected %s, got %s", wantedCid, gotCid) + return nil, fmt.Errorf("CID mismatch: expected %s, got %s", wantedCid, gotCid) + } + return data, nil +} + +func (ser *Epoch) FindCidFromSlot(ctx context.Context, slot uint64) (o cid.Cid, e error) { + startedAt := time.Now() + defer func() { + klog.Infof("Found CID for slot %d in %s: %s", slot, time.Since(startedAt), o) + }() + + // try from cache + if c, err, has := ser.getSlotToCidFromCache(slot); err != nil { + return cid.Undef, err + } else if has { + return c, nil + } + found, err := findCidFromSlot(ser.slotToCidIndex, slot) + if err != nil { + return cid.Undef, err + } + ser.putSlotToCidInCache(slot, found) + return found, nil +} + +func (ser *Epoch) FindCidFromSignature(ctx context.Context, sig solana.Signature) (o cid.Cid, e error) { + startedAt := time.Now() + defer func() { + klog.Infof("Found CID for signature %s in %s: %s", sig, time.Since(startedAt), o) + }() + return findCidFromSignature(ser.sigToCidIndex, sig) +} + +func (ser *Epoch) FindOffsetFromCid(ctx context.Context, cid cid.Cid) (o uint64, e error) { + startedAt := time.Now() + defer func() { + klog.Infof("Found offset for CID %s in %s: %d", cid, time.Since(startedAt), o) + }() + + // try from cache + if offset, err, has := ser.getCidToOffsetFromCache(cid); err != nil { + return 0, err + } else if has { + return offset, nil + } + found, err := findOffsetFromCid(ser.cidToOffsetIndex, cid) + if err != nil { + return 0, err + } + ser.putCidToOffsetInCache(cid, found) + return found, nil +} + +func (ser *Epoch) GetBlock(ctx context.Context, slot uint64) (*ipldbindcode.Block, error) { + // get the slot by slot number + wantedCid, err := ser.FindCidFromSlot(ctx, slot) + if err != nil { + klog.Errorf("failed to find CID for slot %d: %v", slot, err) + return nil, err + } + { + doPrefetch := getValueFromContext(ctx, "prefetch") + if doPrefetch != nil && doPrefetch.(bool) { + // prefetch the block + ser.prefetchSubgraph(ctx, wantedCid) + } + } + // get the block by CID + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find node by cid: %v", err) + return nil, err + } + // try parsing the data as a Block node. + decoded, err := iplddecoders.DecodeBlock(data) + if err != nil { + klog.Errorf("failed to decode block: %v", err) + return nil, err + } + return decoded, nil +} + +func (ser *Epoch) GetEntryByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Entry, error) { + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find node by cid: %v", err) + return nil, err + } + // try parsing the data as an Entry node. + decoded, err := iplddecoders.DecodeEntry(data) + if err != nil { + klog.Errorf("failed to decode entry: %v", err) + return nil, err + } + return decoded, nil +} + +func (ser *Epoch) GetTransactionByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Transaction, error) { + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find node by cid: %v", err) + return nil, err + } + // try parsing the data as a Transaction node. + decoded, err := iplddecoders.DecodeTransaction(data) + if err != nil { + klog.Errorf("failed to decode transaction: %v", err) + return nil, err + } + return decoded, nil +} + +func (ser *Epoch) GetDataFrameByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error) { + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find node by cid: %v", err) + return nil, err + } + // try parsing the data as a DataFrame node. + decoded, err := iplddecoders.DecodeDataFrame(data) + if err != nil { + klog.Errorf("failed to decode data frame: %v", err) + return nil, err + } + return decoded, nil +} + +func (ser *Epoch) GetRewardsByCid(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.Rewards, error) { + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to find node by cid: %v", err) + return nil, err + } + // try parsing the data as a Rewards node. + decoded, err := iplddecoders.DecodeRewards(data) + if err != nil { + klog.Errorf("failed to decode rewards: %v", err) + return nil, err + } + return decoded, nil +} + +func (ser *Epoch) GetTransaction(ctx context.Context, sig solana.Signature) (*ipldbindcode.Transaction, error) { + // get the CID by signature + wantedCid, err := ser.FindCidFromSignature(ctx, sig) + if err != nil { + klog.Errorf("failed to find CID for signature %s: %v", sig, err) + return nil, err + } + { + doPrefetch := getValueFromContext(ctx, "prefetch") + if doPrefetch != nil && doPrefetch.(bool) { + // prefetch the block + ser.prefetchSubgraph(ctx, wantedCid) + } + } + // get the transaction by CID + data, err := ser.GetNodeByCid(ctx, wantedCid) + if err != nil { + klog.Errorf("failed to get node by cid: %v", err) + return nil, err + } + // try parsing the data as a Transaction node. + decoded, err := iplddecoders.DecodeTransaction(data) + if err != nil { + klog.Errorf("failed to decode transaction: %v", err) + return nil, err + } + return decoded, nil +} diff --git a/first.go b/first.go new file mode 100644 index 00000000..2ac83797 --- /dev/null +++ b/first.go @@ -0,0 +1,82 @@ +package main + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + "golang.org/x/sync/errgroup" +) + +// FirstResponse is a helper to get the first non-null result or error from a set of goroutines. +type FirstResponse struct { + result chan any + wg *errgroup.Group + waitWg chan struct{} + resultOnce sync.Once + ctx context.Context + gotResult *atomic.Bool +} + +func NewFirstResponse(ctx context.Context, concurrency int) *FirstResponse { + fr := &FirstResponse{ + result: make(chan any, 1), + waitWg: make(chan struct{}), + gotResult: new(atomic.Bool), + } + fr.wg, ctx = errgroup.WithContext(ctx) + if concurrency > 0 { + fr.wg.SetLimit(concurrency) + } + fr.ctx = ctx + return fr +} + +// Spawn spawns a goroutine that executes the given function. +func (w *FirstResponse) Spawn(f func() (any, error)) (ok bool) { + if w.gotResult.Load() { + return false + } + w.wg.Go(func() error { + result, err := f() + if err != nil { + return w.send(err) // stop the errgroup + } else { + if result != nil { + return w.send(result) // stop the errgroup + } + } + return nil + }) + return true +} + +var errGotFirstResult = errors.New("got first result") + +// send sends the result to the channel, but only once. +// If the result is already sent, it does nothing. +// The result can be something, or an error. +func (w *FirstResponse) send(result any) error { + w.gotResult.Store(true) + w.resultOnce.Do(func() { + w.result <- result + close(w.result) + }) + return errGotFirstResult +} + +// Wait waits for all goroutines to finish, and returns the first non-null result or error. +func (w *FirstResponse) Wait() any { + go func() { + w.wg.Wait() + w.waitWg <- struct{}{} + }() + + select { + case result := <-w.result: + return result + case <-w.waitWg: + return nil + } +} diff --git a/go.mod b/go.mod index dc61dee4..35837898 100644 --- a/go.mod +++ b/go.mod @@ -10,9 +10,9 @@ require ( github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 // indirect github.com/filecoin-project/go-state-types v0.10.0 // indirect github.com/gagliardetto/binary v0.7.8 - github.com/gagliardetto/solana-go v1.8.3-0.20230302093440-c6043ec381e3 + github.com/gagliardetto/solana-go v1.8.4 github.com/gin-gonic/gin v1.9.0 // indirect - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.0 github.com/hannahhoward/go-pubsub v1.0.0 // indirect github.com/ipfs/go-blockservice v0.5.0 // indirect @@ -58,12 +58,17 @@ require ( ) require ( + github.com/fsnotify/fsnotify v1.5.4 + github.com/goware/urlx v0.3.2 github.com/ipld/go-car v0.5.0 + github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 github.com/mr-tron/base58 v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/ronanh/intcomp v1.1.0 + github.com/ryanuber/go-glob v1.0.0 github.com/tejzpr/ordered-concurrently/v3 v3.0.1 github.com/valyala/fasthttp v1.47.0 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 gopkg.in/yaml.v3 v3.0.1 k8s.io/klog v1.0.0 ) @@ -71,8 +76,11 @@ require ( require ( contrib.go.opencensus.io/exporter/stackdriver v0.13.14 // indirect filippo.io/edwards25519 v1.0.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect github.com/andybalholm/brotli v1.0.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.1 // indirect @@ -155,7 +163,6 @@ require ( github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect @@ -195,13 +202,14 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.16.1 // indirect go.uber.org/fx v1.19.2 // indirect + go.uber.org/ratelimit v0.2.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.7.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/term v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect golang.org/x/tools v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect lukechampine.com/blake3 v1.1.7 // indirect diff --git a/go.sum b/go.sum index 02bcd544..c54c6be1 100644 --- a/go.sum +++ b/go.sum @@ -37,12 +37,17 @@ filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7 filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AlekSi/pointer v1.1.0 h1:SSDMPcXD9jSl8FPy9cRzoRaMJtm9g9ggGTxecRUbQoI= github.com/AlekSi/pointer v1.1.0/go.mod h1:y7BvfRI3wXPWKXEBhU71nbnIEEZX0QTSB2Bj48UJIZE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -51,6 +56,7 @@ github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkK github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= @@ -192,12 +198,14 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/gagliardetto/binary v0.7.7/go.mod h1:mUuay5LL8wFVnIlecHakSZMvcdqfs+CsotR5n77kyjM= github.com/gagliardetto/binary v0.7.8 h1:hbIUIP8BWhPm/BIdODxY2Lnv4NlJwNdbtsi1xkhNOec= github.com/gagliardetto/binary v0.7.8/go.mod h1:Cn70Gnvyk1OWkNJXwVh3oYqSYhKLHJN+C/Wguw3fc3U= github.com/gagliardetto/gofuzz v1.2.2/go.mod h1:bkH/3hYLZrMLbfYWA0pWzXmi5TTRZnu4pMGZBkqMKvY= -github.com/gagliardetto/solana-go v1.8.3-0.20230302093440-c6043ec381e3 h1:PtvmSQDTpZ1mwN1t7UlCrUhTyEozJhF3ixuO1m0+9q0= -github.com/gagliardetto/solana-go v1.8.3-0.20230302093440-c6043ec381e3/go.mod h1:i+7aAyNDTHG0jK8GZIBSI4OVvDqkt2Qx+LklYclRNG8= +github.com/gagliardetto/solana-go v1.8.4 h1:vmD/JmTlonyXGy39bAo0inMhmbdAwV7rXZtLDMZeodE= +github.com/gagliardetto/solana-go v1.8.4/go.mod h1:i+7aAyNDTHG0jK8GZIBSI4OVvDqkt2Qx+LklYclRNG8= github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -323,6 +331,8 @@ github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36j github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/goware/urlx v0.3.2 h1:gdoo4kBHlkqZNaf6XlQ12LGtQOmpKJrR04Rc3RnpJEo= +github.com/goware/urlx v0.3.2/go.mod h1:h8uwbJy68o+tQXCGZNa9D73WN8n0r9OBae5bUnLcgjw= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -750,6 +760,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= @@ -935,6 +947,7 @@ go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKY go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1113,6 +1126,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1135,6 +1149,7 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/gsfa/gsfa-read-multiepoch.go b/gsfa/gsfa-read-multiepoch.go new file mode 100644 index 00000000..488c1536 --- /dev/null +++ b/gsfa/gsfa-read-multiepoch.go @@ -0,0 +1,176 @@ +package gsfa + +import ( + "context" + "errors" + "fmt" + + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/gsfa/offsetstore" +) + +type GsfaReaderMultiepoch struct { + epochs []*GsfaReader +} + +func NewGsfaReaderMultiepoch(epochs []*GsfaReader) (*GsfaReaderMultiepoch, error) { + // Check that the epoch is set: + for i, epoch := range epochs { + if epoch.epoch == nil { + return nil, fmt.Errorf("epoch is not set for the #%d provided gsfa reader", i) + } + } + + return &GsfaReaderMultiepoch{ + epochs: epochs, + }, nil +} + +// Close closes all the gsfa readers. +func (gsfa *GsfaReaderMultiepoch) Close() error { + var errs []error + for _, epoch := range gsfa.epochs { + err := epoch.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +// Get gets the signatures for the given public key. +func (gsfa *GsfaReaderMultiepoch) Get( + ctx context.Context, + pk solana.PublicKey, + limit int, +) (EpochToSignatures, error) { + if limit <= 0 { + return nil, nil + } + sigs := make(EpochToSignatures) + currentLimit := limit +epochLoop: + for _, epoch := range gsfa.epochs { + epochSigs, err := epoch.Get(ctx, pk, currentLimit) + if err != nil { + return nil, err + } + epochNum, ok := epoch.GetEpoch() + if !ok { + return nil, fmt.Errorf("epoch is not set for the provided gsfa reader") + } + for _, sig := range epochSigs { + sigs[epochNum] = append(sigs[epochNum], sig) + currentLimit-- + if currentLimit <= 0 { + break epochLoop + } + } + if currentLimit <= 0 { + break epochLoop + } + } + return sigs, nil +} + +type EpochToSignatures map[uint64][]solana.Signature + +// Count returns the number of signatures in the EpochToSignatures. +func (e EpochToSignatures) Count() int { + var count int + for _, sigs := range e { + count += len(sigs) + } + return count +} + +func (multi *GsfaReaderMultiepoch) GetBeforeUntil( + ctx context.Context, + pk solana.PublicKey, + limit int, + before *solana.Signature, // Before this signature, exclusive (i.e. get signatures older than this signature, excluding it). + until *solana.Signature, // Until this signature, inclusive (i.e. stop at this signature, including it). +) (EpochToSignatures, error) { + if limit <= 0 { + return make(EpochToSignatures), nil + } + return multi.iterBeforeUntil(ctx, pk, limit, before, until) +} + +// GetBeforeUntil gets the signatures for the given public key, +// before the given slot. +func (multi *GsfaReaderMultiepoch) iterBeforeUntil( + ctx context.Context, + pk solana.PublicKey, + limit int, + before *solana.Signature, // Before this signature, exclusive (i.e. get signatures older than this signature, excluding it). + until *solana.Signature, // Until this signature, inclusive (i.e. stop at this signature, including it). +) (EpochToSignatures, error) { + if limit <= 0 { + return make(EpochToSignatures), nil + } + + sigs := make(EpochToSignatures) + reachedBefore := false + if before == nil { + reachedBefore = true + } + +epochLoop: + for readerIndex, index := range multi.epochs { + epochNum, ok := index.GetEpoch() + if !ok { + return nil, fmt.Errorf("epoch is not set for the #%d provided gsfa reader", readerIndex) + } + + locs, err := index.offsets.Get(context.Background(), pk) + if err != nil { + if offsetstore.IsNotFound(err) { + continue epochLoop + } + return nil, fmt.Errorf("error while getting initial offset: %w", err) + } + debugln("locs.OffsetToFirst:", locs) + + next := locs.OffsetToLatest // Start from the latest, and go back in time. + + for { + if next == 0 { + continue epochLoop + } + if limit > 0 && sigs.Count() >= limit { + break epochLoop + } + sigIndexes, newNext, err := index.ll.Read(next) + if err != nil { + return nil, fmt.Errorf("error while reading linked log with next=%d: %w", next, err) + } + debugln("sigIndexes:", sigIndexes, "newNext:", newNext) + next = newNext + for _, sigIndex := range sigIndexes { + sig, err := index.sff.Get(sigIndex) + if err != nil { + return nil, fmt.Errorf("error while getting signature at index=%d: %w", sigIndex, err) + } + if !reachedBefore && sig == *before { + reachedBefore = true + continue + } + if !reachedBefore { + continue + } + if limit > 0 && sigs.Count() >= limit { + break epochLoop + } + sigs[epochNum] = append(sigs[epochNum], sig) + if until != nil && sig == *until { + break epochLoop + } + } + } + } + return sigs, nil +} diff --git a/gsfa/gsfa-read.go b/gsfa/gsfa-read.go index 7e6d3abb..61c18dfa 100644 --- a/gsfa/gsfa-read.go +++ b/gsfa/gsfa-read.go @@ -6,17 +6,16 @@ import ( "fmt" "os" "path/filepath" - "time" "github.com/gagliardetto/solana-go" "github.com/rpcpool/yellowstone-faithful/gsfa/linkedlog" "github.com/rpcpool/yellowstone-faithful/gsfa/manifest" "github.com/rpcpool/yellowstone-faithful/gsfa/offsetstore" "github.com/rpcpool/yellowstone-faithful/gsfa/sff" - "github.com/rpcpool/yellowstone-faithful/gsfa/store" ) type GsfaReader struct { + epoch *uint64 offsets *offsetstore.OffsetStore ll *linkedlog.LinkedLog sff *sff.SignaturesFlatFile @@ -41,12 +40,11 @@ func NewGsfaReader(indexRootDir string) (*GsfaReader, error) { index := &GsfaReader{} { offsetsIndexDir := filepath.Join(indexRootDir, "offsets-index") - offsets, err := offsetstore.OpenOffsetStore( + offsets, err := offsetstore.Open( context.Background(), filepath.Join(offsetsIndexDir, "index"), filepath.Join(offsetsIndexDir, "data"), - store.IndexBitSize(22), // NOTE: if you don't specify this, the final size is smaller. - store.GCInterval(time.Hour), + offsetstoreOptions..., ) if err != nil { return nil, fmt.Errorf("error while opening index: %w", err) @@ -78,6 +76,17 @@ func NewGsfaReader(indexRootDir string) (*GsfaReader, error) { return index, nil } +func (index *GsfaReader) SetEpoch(epoch uint64) { + index.epoch = &epoch +} + +func (index *GsfaReader) GetEpoch() (uint64, bool) { + if index.epoch == nil { + return 0, false + } + return *index.epoch, true +} + func (index *GsfaReader) Close() error { return errors.Join( index.offsets.Close(), @@ -91,6 +100,9 @@ func (index *GsfaReader) Get( pk solana.PublicKey, limit int, ) ([]solana.Signature, error) { + if limit <= 0 { + return []solana.Signature{}, nil + } locs, err := index.offsets.Get(context.Background(), pk) if err != nil { if offsetstore.IsNotFound(err) { @@ -137,6 +149,9 @@ func (index *GsfaReader) GetBeforeUntil( before *solana.Signature, // Before this signature, exclusive (i.e. get signatures older than this signature, excluding it). until *solana.Signature, // Until this signature, inclusive (i.e. stop at this signature, including it). ) ([]solana.Signature, error) { + if limit <= 0 { + return []solana.Signature{}, nil + } locs, err := index.offsets.Get(context.Background(), pk) if err != nil { if offsetstore.IsNotFound(err) { diff --git a/gsfa/gsfa-write.go b/gsfa/gsfa-write.go index 45020d65..d5928a6b 100644 --- a/gsfa/gsfa-write.go +++ b/gsfa/gsfa-write.go @@ -14,7 +14,7 @@ import ( "github.com/rpcpool/yellowstone-faithful/gsfa/manifest" "github.com/rpcpool/yellowstone-faithful/gsfa/offsetstore" "github.com/rpcpool/yellowstone-faithful/gsfa/sff" - "github.com/rpcpool/yellowstone-faithful/gsfa/store" + "github.com/rpcpool/yellowstone-faithful/store" "k8s.io/klog" ) @@ -31,6 +31,11 @@ type GsfaWriter struct { firstSlotOfCurrentBatch uint64 } +var offsetstoreOptions = []store.Option{ + store.IndexBitSize(22), + store.GCInterval(time.Hour), +} + // NewGsfaWriter creates or opens an existing index in WRITE mode. func NewGsfaWriter( indexRootDir string, @@ -62,12 +67,11 @@ func NewGsfaWriter( if err := os.MkdirAll(offsetsIndexDir, 0o755); err != nil { return nil, err } - offsets, err := offsetstore.OpenOffsetStore( + offsets, err := offsetstore.Open( context.Background(), filepath.Join(offsetsIndexDir, "index"), filepath.Join(offsetsIndexDir, "data"), - store.IndexBitSize(22), - store.GCInterval(time.Hour), + offsetstoreOptions..., ) if err != nil { return nil, fmt.Errorf("error while opening offset index: %w", err) diff --git a/gsfa/offsetstore/store.go b/gsfa/offsetstore/offsetstore.go similarity index 92% rename from gsfa/offsetstore/store.go rename to gsfa/offsetstore/offsetstore.go index 9b6d5452..3b47d647 100644 --- a/gsfa/offsetstore/store.go +++ b/gsfa/offsetstore/offsetstore.go @@ -6,8 +6,8 @@ import ( "errors" "github.com/gagliardetto/solana-go" - store "github.com/rpcpool/yellowstone-faithful/gsfa/store" - storetypes "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + store "github.com/rpcpool/yellowstone-faithful/store" + storetypes "github.com/rpcpool/yellowstone-faithful/store/types" ) type errorType string @@ -29,8 +29,8 @@ type Locs struct { OffsetToLatest uint64 } -// OpenOffsetStore opens a HashedBlockstore with the default index size -func OpenOffsetStore(ctx context.Context, indexPath string, dataPath string, options ...store.Option) (*OffsetStore, error) { +// Open opens a HashedBlockstore with the default index size +func Open(ctx context.Context, indexPath string, dataPath string, options ...store.Option) (*OffsetStore, error) { store, err := store.OpenStore( ctx, store.GsfaPrimary, @@ -52,7 +52,7 @@ func (as *OffsetStore) Close() error { return as.store.Close() } -func (as *OffsetStore) DeleteAccount(ctx context.Context, pk solana.PublicKey) error { +func (as *OffsetStore) Delete(ctx context.Context, pk solana.PublicKey) error { if ctx.Err() != nil { return ctx.Err() } diff --git a/http-handler.go b/http-handler.go index 9a5455a2..81bf5feb 100644 --- a/http-handler.go +++ b/http-handler.go @@ -3,6 +3,7 @@ package main import ( "encoding/json" "net/http" + "strings" "time" jsoniter "github.com/json-iterator/go" @@ -11,7 +12,7 @@ import ( "k8s.io/klog/v2" ) -func newRPCHandler_fast(handler *rpcServer) func(ctx *fasthttp.RequestCtx) { +func newRPCHandler_fast(handler *deprecatedRPCServer) func(ctx *fasthttp.RequestCtx) { return func(c *fasthttp.RequestCtx) { startedAt := time.Now() defer func() { @@ -56,7 +57,7 @@ func newRPCHandler_fast(handler *rpcServer) func(ctx *fasthttp.RequestCtx) { return } - klog.Infof("Received request: %q", string(body)) + klog.Infof("Received request: %q", strings.TrimSpace(string(body))) rqCtx := &requestContext{ctx: c} diff --git a/http-range.go b/http-range.go new file mode 100644 index 00000000..099e3a36 --- /dev/null +++ b/http-range.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/goware/urlx" +) + +type ReaderAtCloser interface { + io.ReaderAt + io.Closer +} + +func getContentSizeWithHeadOrZeroRange(url string) (int64, error) { + // try sending a HEAD request to the server to get the file size: + resp, err := http.Head(url) + if err != nil { + return 0, err + } + if resp.StatusCode != http.StatusOK { + // try sending a GET request with a zero range to the server to get the file size: + req := &http.Request{ + Method: "GET", + URL: resp.Request.URL, + Header: make(http.Header), + } + req.Header.Set("Range", "bytes=0-0") + resp, err = http.DefaultClient.Do(req) + if err != nil { + return 0, err + } + if resp.StatusCode != http.StatusPartialContent { + return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + // now find the content length: + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return 0, fmt.Errorf("missing Content-Range header") + } + var contentLength int64 + _, err := fmt.Sscanf(contentRange, "bytes 0-0/%d", &contentLength) + if err != nil { + return 0, err + } + return contentLength, nil + } + return resp.ContentLength, nil +} + +// remoteHTTPFileAsIoReaderAt returns a ReaderAtCloser for a remote file. +// The returned ReaderAtCloser is backed by a http.Client. +func remoteHTTPFileAsIoReaderAt(ctx context.Context, url string) (ReaderAtCloser, error) { + // send a request to the server to get the file size: + contentLength, err := getContentSizeWithHeadOrZeroRange(url) + if err != nil { + return nil, err + } + if contentLength == 0 { + return nil, fmt.Errorf("missing Content-Length/Content-Range header, or file is empty") + } + + // Create a cache with a default expiration time of 5 minutes, and which + // purges expired items every 10 minutes + rr := &HTTPSingleFileRemoteReaderAt{ + url: url, + contentLength: contentLength, + client: newHTTPClient(), + } + parsedURL, err := urlx.Parse(url) + if err != nil { + return nil, err + } + name := filepath.Base(parsedURL.Path) + + rc := NewRangeCache( + contentLength, + name, + func(p []byte, off int64) (n int, err error) { + return remoteReadAt(rr.client, rr.url, p, off) + }) + rc.StartCacheGC(ctx, 1*time.Minute) + rr.ca = rc + + return rr, nil +} + +type HTTPSingleFileRemoteReaderAt struct { + url string + contentLength int64 + client *http.Client + ca *RangeCache +} + +// Close implements io.Closer. +func (r *HTTPSingleFileRemoteReaderAt) Close() error { + r.client.CloseIdleConnections() + return r.ca.Close() +} + +func retryExpotentialBackoff( + ctx context.Context, + startDuration time.Duration, + maxRetries int, + fn func() error, +) error { + var err error + for i := 0; i < maxRetries; i++ { + err = fn() + if err == nil { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(startDuration): + startDuration *= 2 + } + } + return fmt.Errorf("failed after %d retries; last error: %w", maxRetries, err) +} + +func (r *HTTPSingleFileRemoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + if off >= r.contentLength { + return 0, io.EOF + } + v, err := r.ca.GetRange(context.Background(), off, int64(len(p))) + if err != nil { + return 0, err + } + n = copy(p, v) + if n < len(p) { + return n, io.ErrUnexpectedEOF + } + return n, nil +} + +func remoteReadAt(client *http.Client, url string, p []byte, off int64) (n int, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return 0, err + } + { + req.Header.Set("Connection", "keep-alive") + req.Header.Set("Keep-Alive", "timeout=600") + } + + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p)))) + + var resp *http.Response + err = retryExpotentialBackoff( + context.Background(), + 100*time.Millisecond, + 3, + func() error { + resp, err = client.Do(req) + return err + }) + if err != nil { + return 0, err + } + defer resp.Body.Close() + { + n, err := io.ReadFull(resp.Body, p) + if err != nil { + return 0, err + } + return n, nil + } +} + +type readCloserWrapper struct { + rac ReaderAtCloser + isRemote bool + name string +} + +// when reading print a dot +func (r *readCloserWrapper) ReadAt(p []byte, off int64) (n int, err error) { + startedAt := time.Now() + defer func() { + took := time.Since(startedAt) + if DebugMode { + var icon string + if r.isRemote { + // add internet icon + icon = "๐ŸŒ " + } else { + // add disk icon + icon = "๐Ÿ’พ " + } + prefix := icon + "[READ-UNKNOWN]" + // if has suffix .index, then it's an index file + if strings.HasSuffix(r.name, ".index") { + prefix = icon + azureBG("[READ-INDEX]") + } + // if has suffix .car, then it's a car file + if strings.HasSuffix(r.name, ".car") { + prefix = icon + purpleBG("[READ-CAR]") + } + fmt.Fprintf(os.Stderr, prefix+" %s:%d+%d (%s)\n", filepath.Base(r.name), off, len(p), took) + } + }() + return r.rac.ReadAt(p, off) +} + +func purpleBG(s string) string { + // blue bg, black fg + return "\033[48;5;4m\033[38;5;0m" + s + "\033[0m" +} + +func azureBG(s string) string { + // azure bg, black fg + return "\033[48;5;6m\033[38;5;0m" + s + "\033[0m" +} + +// when closing print a newline +func (r *readCloserWrapper) Close() error { + return r.rac.Close() +} diff --git a/index-sig-to-cid.go b/index-sig-to-cid.go index c1e0585d..7f7b0623 100644 --- a/index-sig-to-cid.go +++ b/index-sig-to-cid.go @@ -12,6 +12,7 @@ import ( "github.com/gagliardetto/solana-go" "github.com/ipfs/go-cid" carv2 "github.com/ipld/go-car/v2" + "github.com/rpcpool/yellowstone-faithful/bucketteer" "github.com/rpcpool/yellowstone-faithful/compactindex36" "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" "k8s.io/klog/v2" @@ -189,7 +190,7 @@ func VerifyIndex_sig2cid(ctx context.Context, carPath string, indexFilePath stri got, err := findCidFromSignature(c2o, sig) if err != nil { - return fmt.Errorf("failed to put cid to offset: %w", err) + return fmt.Errorf("failed to find cid from signature: %w", err) } if !got.Equals(c) { @@ -209,6 +210,95 @@ func VerifyIndex_sig2cid(ctx context.Context, carPath string, indexFilePath stri return nil } +func VerifyIndex_sigExists(ctx context.Context, carPath string, indexFilePath string) error { + // Check if the CAR file exists: + exists, err := fileExists(carPath) + if err != nil { + return fmt.Errorf("failed to check if CAR file exists: %w", err) + } + if !exists { + return fmt.Errorf("CAR file %s does not exist", carPath) + } + + // Check if the index file exists: + exists, err = fileExists(indexFilePath) + if err != nil { + return fmt.Errorf("failed to check if index file exists: %w", err) + } + if !exists { + return fmt.Errorf("index file %s does not exist", indexFilePath) + } + + cr, err := carv2.OpenReader(carPath) + if err != nil { + return fmt.Errorf("failed to open CAR file: %w", err) + } + + // check it has 1 root + roots, err := cr.Roots() + if err != nil { + return fmt.Errorf("failed to get roots: %w", err) + } + // There should be only one root CID in the CAR file. + if len(roots) != 1 { + return fmt.Errorf("CAR file has %d roots, expected 1", len(roots)) + } + + sigExists, err := bucketteer.Open(indexFilePath) + if err != nil { + return fmt.Errorf("failed to open index: %w", err) + } + + // check root_cid matches + rootCID := roots[0] + storedRootCidString := sigExists.GetMeta("root_cid") + if storedRootCidString == "" { + return fmt.Errorf("index file does not have a root_cid meta") + } + storedRootCid, err := cid.Parse(storedRootCidString) + if err != nil { + return fmt.Errorf("failed to parse stored root cid: %w", err) + } + if !rootCID.Equals(storedRootCid) { + return fmt.Errorf("root CID mismatch: expected %s, got %s", rootCID, storedRootCid) + } + + dr, err := cr.DataReader() + if err != nil { + return fmt.Errorf("failed to get data reader: %w", err) + } + + numItems := uint64(0) + err = FindTransactions( + ctx, + dr, + func(c cid.Cid, txNode *ipldbindcode.Transaction) error { + sig, err := readFirstSignature(txNode.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to read signature: %w", err) + } + + got, err := sigExists.Has(sig) + if err != nil { + return fmt.Errorf("failed to check if sig exists: %w", err) + } + if !got { + return fmt.Errorf("sig %s: expected to exist, but it does not", sig) + } + + numItems++ + if numItems%100_000 == 0 { + printToStderr(".") + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed to verify index; error while iterating over blocks: %w", err) + } + return nil +} + func findCidFromSignature(db *compactindex36.DB, sig solana.Signature) (cid.Cid, error) { bucket, err := db.LookupBucket(sig[:]) if err != nil { diff --git a/lassie-wrapper.go b/lassie-wrapper.go index 3f66d31d..f1fa38ef 100644 --- a/lassie-wrapper.go +++ b/lassie-wrapper.go @@ -16,6 +16,7 @@ import ( "github.com/ipld/go-ipld-prime/storage" "github.com/ipld/go-ipld-prime/storage/memstore" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" "github.com/urfave/cli/v2" "k8s.io/klog/v2" ) @@ -92,7 +93,10 @@ func (l *lassieWrapper) Fetch( return stats, nil } -func newLassieWrapper(cctx *cli.Context) (*lassieWrapper, error) { +func newLassieWrapper( + cctx *cli.Context, + fetchProviderAddrInfos []peer.AddrInfo, +) (*lassieWrapper, error) { ctx := cctx.Context providerTimeout := cctx.Duration("provider-timeout") diff --git a/main.go b/main.go index ff688a04..fc16471e 100644 --- a/main.go +++ b/main.go @@ -58,6 +58,7 @@ func main() { newCmd_rpcServerCar(), newCmd_rpcServerFilecoin(), newCmd_Version(), + newCmd_rpc(), }, } diff --git a/multiepoch-getBlock.go b/multiepoch-getBlock.go new file mode 100644 index 00000000..d408a055 --- /dev/null +++ b/multiepoch-getBlock.go @@ -0,0 +1,482 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "runtime" + "sort" + "sync" + + "github.com/gagliardetto/solana-go" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car/util" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/rpcpool/yellowstone-faithful/compactindex36" + "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" + solanablockrewards "github.com/rpcpool/yellowstone-faithful/solana-block-rewards" + "github.com/sourcegraph/jsonrpc2" + "golang.org/x/sync/errgroup" + "k8s.io/klog/v2" +) + +func (multi *MultiEpoch) handleGetBlock(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + tim := newTimer() + params, err := parseGetBlockRequest(req.Params) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: "Invalid params", + }, fmt.Errorf("failed to parse params: %w", err) + } + if err := params.Validate(); err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: err.Error(), + }, fmt.Errorf("failed to validate params: %w", err) + } + tim.time("parseGetBlockRequest") + slot := params.Slot + + // find the epoch that contains the requested slot + epochNumber := CalcEpochForSlot(slot) + epochHandler, err := multi.GetEpoch(epochNumber) + if err != nil { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Epoch %d is not available", epochNumber), + }, fmt.Errorf("failed to get epoch %d: %w", epochNumber, err) + } + + block, err := epochHandler.GetBlock(WithSubrapghPrefetch(ctx, true), slot) + if err != nil { + if errors.Is(err, compactindex36.ErrNotFound) { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Slot %d was skipped, or missing in long-term storage", slot), + }, err + } else { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Failed to get block", + }, fmt.Errorf("failed to get block: %w", err) + } + } + tim.time("GetBlock") + { + prefetcherFromCar := func() error { + parentIsInPreviousEpoch := CalcEpochForSlot(uint64(block.Meta.Parent_slot)) != CalcEpochForSlot(slot) + + var blockCid, parentCid cid.Cid + wg := new(errgroup.Group) + wg.Go(func() (err error) { + blockCid, err = epochHandler.FindCidFromSlot(ctx, slot) + if err != nil { + return err + } + return nil + }) + wg.Go(func() (err error) { + if parentIsInPreviousEpoch { + return nil + } + parentCid, err = epochHandler.FindCidFromSlot(ctx, uint64(block.Meta.Parent_slot)) + if err != nil { + return err + } + return nil + }) + err = wg.Wait() + if err != nil { + return err + } + klog.Infof("%s -> %s", parentCid, blockCid) + { + var blockOffset, parentOffset uint64 + wg := new(errgroup.Group) + wg.Go(func() (err error) { + blockOffset, err = epochHandler.FindOffsetFromCid(ctx, blockCid) + if err != nil { + return err + } + return nil + }) + wg.Go(func() (err error) { + if parentIsInPreviousEpoch { + // get car file header size + parentOffset = epochHandler.remoteCarHeaderSize + return nil + } + parentOffset, err = epochHandler.FindOffsetFromCid(ctx, parentCid) + if err != nil { + // If the parent is not found, it (probably) means that it's outside of the car file. + parentOffset = epochHandler.remoteCarHeaderSize + } + return nil + }) + err = wg.Wait() + if err != nil { + return err + } + + length := blockOffset - parentOffset + MiB := uint64(1024 * 1024) + maxSize := MiB * 100 + if length > maxSize { + length = maxSize + } + + idealEntrySize := uint64(36190) + var start uint64 + if parentIsInPreviousEpoch { + start = parentOffset + } else { + if parentOffset > idealEntrySize { + start = parentOffset - idealEntrySize + } else { + start = parentOffset + } + length += idealEntrySize + } + + klog.Infof("prefetching CAR: start=%d length=%d (parent_offset=%d)", start, length, parentOffset) + carSection, err := epochHandler.ReadAtFromCar(ctx, start, length) + if err != nil { + return err + } + dr := bytes.NewReader(carSection) + if !parentIsInPreviousEpoch { + dr.Seek(int64(idealEntrySize), io.SeekStart) + } + br := bufio.NewReader(dr) + + gotCid, data, err := util.ReadNode(br) + if err != nil { + return fmt.Errorf("failed to read first node: %w", err) + } + if !parentIsInPreviousEpoch && !gotCid.Equals(parentCid) { + return fmt.Errorf("CID mismatch: expected %s, got %s", parentCid, gotCid) + } + epochHandler.putNodeInCache(gotCid, data) + + for { + gotCid, data, err = util.ReadNode(br) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("failed to read node: %w", err) + } + if gotCid.Equals(blockCid) { + break + } + epochHandler.putNodeInCache(gotCid, data) + } + } + return nil + } + if epochHandler.lassieFetcher == nil { + err := prefetcherFromCar() + if err != nil { + klog.Errorf("failed to prefetch from car: %v", err) + } + } + } + blocktime := uint64(block.Meta.Blocktime) + + allTransactionNodes := make([][]*ipldbindcode.Transaction, len(block.Entries)) + mu := &sync.Mutex{} + var lastEntryHash solana.Hash + { + wg := new(errgroup.Group) + wg.SetLimit(runtime.NumCPU() * 2) + // get entries from the block + for entryIndex, entry := range block.Entries { + entryIndex := entryIndex + entryCid := entry.(cidlink.Link).Cid + wg.Go(func() error { + // get the entry by CID + entryNode, err := epochHandler.GetEntryByCid(ctx, entryCid) + if err != nil { + klog.Errorf("failed to decode Entry: %v", err) + return err + } + + if entryIndex == len(block.Entries)-1 { + lastEntryHash = solana.HashFromBytes(entryNode.Hash) + } + + twg := new(errgroup.Group) + twg.SetLimit(runtime.NumCPU()) + // get the transactions from the entry + allTransactionNodes[entryIndex] = make([]*ipldbindcode.Transaction, len(entryNode.Transactions)) + for txI := range entryNode.Transactions { + txI := txI + tx := entryNode.Transactions[txI] + twg.Go(func() error { + // get the transaction by CID + tcid := tx.(cidlink.Link).Cid + txNode, err := epochHandler.GetTransactionByCid(ctx, tcid) + if err != nil { + klog.Errorf("failed to decode Transaction %s: %v", tcid, err) + return nil + } + // NOTE: this messes up the order of transactions, + // but we sort them later anyway. + mu.Lock() + allTransactionNodes[entryIndex][txI] = txNode + mu.Unlock() + return nil + }) + } + return twg.Wait() + }) + } + err = wg.Wait() + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get entries: %v", err) + } + } + tim.time("get entries") + + var allTransactions []GetTransactionResponse + var rewards any + hasRewards := !block.Rewards.(cidlink.Link).Cid.Equals(DummyCID) + if *params.Options.Rewards && hasRewards { + rewardsNode, err := epochHandler.GetRewardsByCid(ctx, block.Rewards.(cidlink.Link).Cid) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decode Rewards: %v", err) + } + rewardsBuf, err := loadDataFromDataFrames(&rewardsNode.Data, epochHandler.GetDataFrameByCid) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to load Rewards dataFrames: %v", err) + } + + uncompressedRewards, err := decompressZstd(rewardsBuf) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decompress Rewards: %v", err) + } + // try decoding as protobuf + actualRewards, err := solanablockrewards.ParseRewards(uncompressedRewards) + if err != nil { + // TODO: add support for legacy rewards format + fmt.Println("Rewards are not protobuf: " + err.Error()) + } else { + { + // encode rewards as JSON, then decode it as a map + buf, err := json.Marshal(actualRewards) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to encode rewards: %v", err) + } + var m map[string]any + err = json.Unmarshal(buf, &m) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decode rewards: %v", err) + } + if _, ok := m["rewards"]; ok { + // iter over rewards as an array of maps, and add a "commission" field to each = nil + rewardsAsArray := m["rewards"].([]any) + for _, reward := range rewardsAsArray { + rewardAsMap := reward.(map[string]any) + if _, ok := rewardAsMap["commission"]; !ok { + rewardAsMap["commission"] = nil + } + // if the commission field is a string, convert it to a float + if asString, ok := rewardAsMap["commission"].(string); ok { + rewardAsMap["commission"] = asFloat(asString) + } + // if no lamports field, add it and set it to 0 + if _, ok := rewardAsMap["lamports"]; !ok { + rewardAsMap["lamports"] = uint64(0) + } + + // if it has a post_balance field, convert it to postBalance + if _, ok := rewardAsMap["post_balance"]; ok { + rewardAsMap["postBalance"] = rewardAsMap["post_balance"] + delete(rewardAsMap, "post_balance") + } + // if it has a reward_type field, convert it to rewardType + if _, ok := rewardAsMap["reward_type"]; ok { + rewardAsMap["rewardType"] = rewardAsMap["reward_type"] + delete(rewardAsMap, "reward_type") + + // if it's a float, convert to int and use rentTypeToString + if asFloat, ok := rewardAsMap["rewardType"].(float64); ok { + rewardAsMap["rewardType"] = rewardTypeToString(int(asFloat)) + } + } + } + rewards = rewardsAsArray + // sort.Slice(rewardsAsArray, func(i, j int) bool { + // // sort by rewardType, then by pubkey + // if rewardTypeStringToInt(rewardsAsArray[i].(map[string]any)["rewardType"].(string)) != rewardTypeStringToInt(rewardsAsArray[j].(map[string]any)["rewardType"].(string)) { + // return rewardTypeStringToInt(rewardsAsArray[i].(map[string]any)["rewardType"].(string)) > rewardTypeStringToInt(rewardsAsArray[j].(map[string]any)["rewardType"].(string)) + // } + // return bytes.Compare(solana.MPK(rewardsAsArray[i].(map[string]any)["pubkey"].(string)).Bytes(), solana.MPK(rewardsAsArray[j].(map[string]any)["pubkey"].(string)).Bytes()) < 0 + // }) + } else { + klog.Errorf("did not find rewards field in rewards") + rewards = make([]any, 0) + } + } + } + } else { + rewards = make([]any, 0) + } + tim.time("get rewards") + { + for _, transactionNode := range mergeTxNodeSlices(allTransactionNodes) { + var txResp GetTransactionResponse + + // response.Slot = uint64(transactionNode.Slot) + // if blocktime != 0 { + // response.Blocktime = &blocktime + // } + + { + pos, ok := transactionNode.GetPositionIndex() + if ok { + txResp.Position = uint64(pos) + } + tx, meta, err := parseTransactionAndMetaFromNode(transactionNode, epochHandler.GetDataFrameByCid) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decode transaction: %v", err) + } + txResp.Signatures = tx.Signatures + if tx.Message.IsVersioned() { + txResp.Version = tx.Message.GetVersion() - 1 + } else { + txResp.Version = "legacy" + } + txResp.Meta = meta + + encodedTx, err := encodeTransactionResponseBasedOnWantedEncoding(*params.Options.Encoding, tx) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to encode transaction: %v", err) + } + txResp.Transaction = encodedTx + } + + allTransactions = append(allTransactions, txResp) + } + } + sort.Slice(allTransactions, func(i, j int) bool { + return allTransactions[i].Position < allTransactions[j].Position + }) + tim.time("get transactions") + var blockResp GetBlockResponse + blockResp.Transactions = allTransactions + blockResp.BlockTime = &blocktime + blockResp.Blockhash = lastEntryHash.String() + blockResp.ParentSlot = uint64(block.Meta.Parent_slot) + blockResp.Rewards = rewards + + { + blockHeight, ok := block.GetBlockHeight() + if ok { + blockResp.BlockHeight = &blockHeight + } + } + { + // get parent slot + parentSlot := uint64(block.Meta.Parent_slot) + if parentSlot != 0 && CalcEpochForSlot(parentSlot) == epochNumber { + // NOTE: if the parent is in the same epoch, we can get it from the same epoch handler as the block; + // otherwise, we need to get it from the previous epoch (TODO: implement this) + parentBlock, err := epochHandler.GetBlock(WithSubrapghPrefetch(ctx, false), parentSlot) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get/decode block: %v", err) + } + + if len(parentBlock.Entries) > 0 { + lastEntryCidOfParent := parentBlock.Entries[len(parentBlock.Entries)-1] + parentEntryNode, err := epochHandler.GetEntryByCid(ctx, lastEntryCidOfParent.(cidlink.Link).Cid) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decode Entry: %v", err) + } + parentEntryHash := solana.HashFromBytes(parentEntryNode.Hash).String() + blockResp.PreviousBlockhash = &parentEntryHash + } + } else { + klog.Infof("parent slot is in a different epoch, not implemented yet (can't get previousBlockhash)") + } + } + tim.time("get parent block") + + err = conn.Reply( + ctx, + req.ID, + blockResp, + func(m map[string]any) map[string]any { + transactions, ok := m["transactions"].([]any) + if !ok { + return m + } + for i := range transactions { + transaction, ok := transactions[i].(map[string]any) + if !ok { + continue + } + transactions[i] = adaptTransactionMetaToExpectedOutput(transaction) + } + + return m + }, + ) + tim.time("reply") + if err != nil { + return nil, fmt.Errorf("failed to reply: %w", err) + } + return nil, nil +} + +func asFloat(s string) float64 { + var f float64 + _, err := fmt.Sscanf(s, "%f", &f) + if err != nil { + panic(err) + } + return f +} + +func mergeTxNodeSlices(slices [][]*ipldbindcode.Transaction) []*ipldbindcode.Transaction { + var out []*ipldbindcode.Transaction + for _, slice := range slices { + out = append(out, slice...) + } + return out +} diff --git a/multiepoch-getBlockTime.go b/multiepoch-getBlockTime.go new file mode 100644 index 00000000..a9b87b71 --- /dev/null +++ b/multiepoch-getBlockTime.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "errors" + "fmt" + + "github.com/rpcpool/yellowstone-faithful/compactindex36" + "github.com/sourcegraph/jsonrpc2" +) + +func (multi *MultiEpoch) handleGetBlockTime(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + blockNum, err := parseGetBlockTimeRequest(req.Params) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: "Invalid params", + }, fmt.Errorf("failed to parse params: %w", err) + } + + // find the epoch that contains the requested slot + epochNumber := CalcEpochForSlot(blockNum) + epochHandler, err := multi.GetEpoch(epochNumber) + if err != nil { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Epoch %d is not available", epochNumber), + }, fmt.Errorf("failed to get epoch %d: %w", epochNumber, err) + } + + block, err := epochHandler.GetBlock(WithSubrapghPrefetch(ctx, false), blockNum) + if err != nil { + if errors.Is(err, compactindex36.ErrNotFound) { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Slot %d was skipped, or missing in long-term storage", blockNum), + }, err + } else { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Failed to get block", + }, fmt.Errorf("failed to get block: %w", err) + } + } + blockTime := uint64(block.Meta.Blocktime) + err = conn.ReplyRaw( + ctx, + req.ID, + blockTime, + ) + if err != nil { + return nil, fmt.Errorf("failed to reply: %w", err) + } + return nil, nil +} diff --git a/multiepoch-getSignaturesForAddress.go b/multiepoch-getSignaturesForAddress.go new file mode 100644 index 00000000..d4126a15 --- /dev/null +++ b/multiepoch-getSignaturesForAddress.go @@ -0,0 +1,220 @@ +package main + +import ( + "context" + "fmt" + "runtime" + "sort" + "sync" + + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/gsfa" + metalatest "github.com/rpcpool/yellowstone-faithful/parse_legacy_transaction_status_meta/v-latest" + metaoldest "github.com/rpcpool/yellowstone-faithful/parse_legacy_transaction_status_meta/v-oldest" + "github.com/rpcpool/yellowstone-faithful/third_party/solana_proto/confirmed_block" + "github.com/sourcegraph/jsonrpc2" + "golang.org/x/sync/errgroup" + "k8s.io/klog/v2" +) + +// getGsfaReadersInEpochDescendingOrder returns a list of gsfa readers in epoch order (from most recent to oldest). +func (ser *MultiEpoch) getGsfaReadersInEpochDescendingOrder() ([]*gsfa.GsfaReader, []uint64) { + ser.mu.RLock() + defer ser.mu.RUnlock() + + epochs := make([]*Epoch, 0, len(ser.epochs)) + for _, epoch := range ser.epochs { + epochs = append(epochs, epoch) + } + + // sort epochs by epoch number (from biggest to smallest): + sort.Slice(epochs, func(i, j int) bool { + return epochs[i].epoch > epochs[j].epoch + }) + + gsfaReaders := make([]*gsfa.GsfaReader, 0, len(epochs)) + epochNums := make([]uint64, 0, len(epochs)) + for _, epoch := range epochs { + if epoch.gsfaReader != nil { + epoch.gsfaReader.SetEpoch(epoch.Epoch()) + gsfaReaders = append(gsfaReaders, epoch.gsfaReader) + epochNums = append(epochNums, epoch.Epoch()) + } + } + return gsfaReaders, epochNums +} + +func countSignatures(v map[uint64][]solana.Signature) int { + var count int + for _, sigs := range v { + count += len(sigs) + } + return count +} + +func (multi *MultiEpoch) handleGetSignaturesForAddress(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + // TODO: + // - parse and validate request + // - get list of epochs (from most recent to oldest) + // - iterate until we find the requested number of signatures + // - expand the signatures with tx data + signaturesOnly := multi.options.GsfaOnlySignatures + + params, err := parseGetSignaturesForAddressParams(req.Params) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: "Invalid params", + }, fmt.Errorf("failed to parse params: %v", err) + } + pk := params.Address + limit := params.Limit + + gsfaIndexes, _ := multi.getGsfaReadersInEpochDescendingOrder() + if len(gsfaIndexes) == 0 { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "getSignaturesForAddress method is not enabled", + }, fmt.Errorf("no gsfa indexes found") + } + + gsfaMulti, err := gsfa.NewGsfaReaderMultiepoch(gsfaIndexes) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to create gsfa multiepoch reader: %w", err) + } + + // Get the signatures: + foundSignatures, err := gsfaMulti.GetBeforeUntil( + ctx, + pk, + limit, + params.Before, + params.Until, + ) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get signatures: %w", err) + } + + if len(foundSignatures) == 0 { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Not found", + }, fmt.Errorf("no signatures found for address: %s", pk) + } + + var blockTimeCache struct { + m map[uint64]uint64 + mu sync.Mutex + } + blockTimeCache.m = make(map[uint64]uint64) + getBlockTime := func(slot uint64, ser *Epoch) uint64 { + blockTimeCache.mu.Lock() + defer blockTimeCache.mu.Unlock() + if blockTime, ok := blockTimeCache.m[slot]; ok { + return blockTime + } + block, err := ser.GetBlock(ctx, slot) + if err != nil { + klog.Errorf("failed to get block time for slot %d: %v", slot, err) + return 0 + } + blockTimeCache.m[slot] = uint64(block.Meta.Blocktime) + return uint64(block.Meta.Blocktime) + } + + wg := new(errgroup.Group) + wg.SetLimit(runtime.NumCPU() * 2) + // The response is an array of objects: [{signature: string}] + response := make([]map[string]any, countSignatures(foundSignatures)) + numBefore := 0 + for ei := range foundSignatures { + epoch := ei + ser, err := multi.GetEpoch(epoch) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get epoch %d: %w", epoch, err) + } + + sigs := foundSignatures[ei] + for i := range sigs { + ii := numBefore + i + sig := sigs[i] + wg.Go(func() error { + response[ii] = map[string]any{ + "signature": sig.String(), + } + if signaturesOnly { + return nil + } + transactionNode, err := ser.GetTransaction(ctx, sig) + if err != nil { + klog.Errorf("failed to get tx %s: %v", sig, err) + return nil + } + if transactionNode != nil { + { + tx, meta, err := parseTransactionAndMetaFromNode(transactionNode, ser.GetDataFrameByCid) + if err == nil { + switch metaValue := meta.(type) { + case *confirmed_block.TransactionStatusMeta: + response[ii]["err"] = metaValue.Err + case *metalatest.TransactionStatusMeta: + response[ii]["err"] = metaValue.Status + case *metaoldest.TransactionStatusMeta: + response[ii]["err"] = metaValue.Status + } + + if _, ok := response[ii]["err"]; ok { + response[ii]["err"], _ = parseTransactionError(response[ii]["err"]) + } + + memoData := getMemoInstructionDataFromTransaction(&tx) + if memoData != nil { + response[ii]["memo"] = string(memoData) + } + } + + if _, ok := response[ii]["memo"]; !ok { + response[ii]["memo"] = nil + } + if _, ok := response[ii]["err"]; !ok { + response[ii]["err"] = nil + } + } + slot := uint64(transactionNode.Slot) + response[ii]["slot"] = slot + response[ii]["blockTime"] = getBlockTime(slot, ser) + response[ii]["confirmationStatus"] = "finalized" + } + return nil + }) + } + numBefore += len(sigs) + } + if err := wg.Wait(); err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get tx data: %w", err) + } + + // reply with the data + err = conn.ReplyRaw( + ctx, + req.ID, + response, + ) + if err != nil { + return nil, fmt.Errorf("failed to reply: %w", err) + } + + return nil, nil +} diff --git a/multiepoch-getTransaction.go b/multiepoch-getTransaction.go new file mode 100644 index 00000000..e8119c3a --- /dev/null +++ b/multiepoch-getTransaction.go @@ -0,0 +1,239 @@ +package main + +import ( + "context" + "errors" + "fmt" + "sort" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/bucketteer" + "github.com/rpcpool/yellowstone-faithful/compactindex36" + "github.com/sourcegraph/jsonrpc2" + "k8s.io/klog/v2" +) + +func (multi *MultiEpoch) getAllBucketteers() map[uint64]*bucketteer.Reader { + multi.mu.RLock() + defer multi.mu.RUnlock() + bucketteers := make(map[uint64]*bucketteer.Reader) + for _, epoch := range multi.epochs { + if epoch.sigExists != nil { + bucketteers[epoch.Epoch()] = epoch.sigExists + } + } + return bucketteers +} + +func (multi *MultiEpoch) findEpochNumberFromSignature(ctx context.Context, sig solana.Signature) (uint64, error) { + // FLOW: + // - if one epoch, just return that epoch + // - if multiple epochs, use sigToEpoch to find the epoch number + // - if sigToEpoch is not available, linear search through all epochs + + if epochs := multi.GetEpochNumbers(); len(epochs) == 1 { + return epochs[0], nil + } + + // Linear search: + numbers := multi.GetEpochNumbers() + // sort from highest to lowest: + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] > numbers[j] + }) + + buckets := multi.getAllBucketteers() + + found := make([]uint64, 0) + startedSearchingCandidatesAt := time.Now() + for _, epochNumber := range numbers { + bucket, ok := buckets[epochNumber] + if !ok { + continue + } + if has, err := bucket.Has(sig); err != nil { + return 0, fmt.Errorf("failed to check if signature exists in bucket: %v", err) + } else if has { + found = append(found, epochNumber) + } + } + klog.Infof( + "Searched %d epochs in %s, and found %d candidate epochs for %s: %v", + len(numbers), + time.Since(startedSearchingCandidatesAt), + len(found), + sig, + found, + ) + + if len(found) == 0 { + return 0, ErrNotFound + } + + for _, epochNumber := range found { + epoch, err := multi.GetEpoch(epochNumber) + if err != nil { + return 0, fmt.Errorf("failed to get epoch %d: %v", epochNumber, err) + } + if _, err := epoch.FindCidFromSignature(ctx, sig); err == nil { + return epochNumber, nil + } + } + return 0, ErrNotFound + + // Search all epochs in parallel: + wg := NewFirstResponse(ctx, multi.options.EpochSearchConcurrency) + for i := range numbers { + epochNumber := numbers[i] + wg.Spawn(func() (any, error) { + epoch, err := multi.GetEpoch(epochNumber) + if err != nil { + return nil, fmt.Errorf("failed to get epoch %d: %v", epochNumber, err) + } + if _, err := epoch.FindCidFromSignature(ctx, sig); err == nil { + return epochNumber, nil + } + // Not found in this epoch. + return nil, nil + }) + } + switch result := wg.Wait().(type) { + case nil: + // All epochs were searched, but the signature was not found. + return 0, ErrNotFound + case error: + // An error occurred while searching one of the epochs. + return 0, result + case uint64: + // The signature was found in one of the epochs. + return result, nil + default: + return 0, fmt.Errorf("unexpected result: (%T) %v", result, result) + } +} + +func (multi *MultiEpoch) handleGetTransaction(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + if multi.CountEpochs() == 0 { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "no epochs available", + }, fmt.Errorf("no epochs available") + } + + params, err := parseGetTransactionRequest(req.Params) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: "Invalid params", + }, fmt.Errorf("failed to parse params: %v", err) + } + if err := params.Validate(); err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: err.Error(), + }, fmt.Errorf("failed to validate params: %w", err) + } + + sig := params.Signature + + startedEpochLookupAt := time.Now() + epochNumber, err := multi.findEpochNumberFromSignature(ctx, sig) + if err != nil { + if errors.Is(err, ErrNotFound) { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Epoch %d is not available from this RPC", epochNumber), + }, fmt.Errorf("failed to find epoch number from signature %s: %v", sig, err) + } + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get epoch for signature %s: %v", sig, err) + } + klog.Infof("Found signature %s in epoch %d in %s", sig, epochNumber, time.Since(startedEpochLookupAt)) + + epochHandler, err := multi.GetEpoch(uint64(epochNumber)) + if err != nil { + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: fmt.Sprintf("Epoch %d is not available from this RPC", epochNumber), + }, fmt.Errorf("failed to get handler for epoch %d: %w", epochNumber, err) + } + + transactionNode, err := epochHandler.GetTransaction(WithSubrapghPrefetch(ctx, true), sig) + if err != nil { + if errors.Is(err, compactindex36.ErrNotFound) { + // NOTE: solana just returns null here in case of transaction not found + return &jsonrpc2.Error{ + Code: CodeNotFound, + Message: "Transaction not found", + }, fmt.Errorf("transaction %s not found", sig) + } + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get Transaction: %v", err) + } + + var response GetTransactionResponse + + response.Slot = ptrToUint64(uint64(transactionNode.Slot)) + { + block, err := epochHandler.GetBlock(ctx, uint64(transactionNode.Slot)) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to get block: %v", err) + } + blocktime := uint64(block.Meta.Blocktime) + if blocktime != 0 { + response.Blocktime = &blocktime + } + } + + { + pos, ok := transactionNode.GetPositionIndex() + if ok { + response.Position = uint64(pos) + } + tx, meta, err := parseTransactionAndMetaFromNode(transactionNode, epochHandler.GetDataFrameByCid) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to decode transaction: %v", err) + } + response.Signatures = tx.Signatures + if tx.Message.IsVersioned() { + response.Version = tx.Message.GetVersion() - 1 + } else { + response.Version = "legacy" + } + response.Meta = meta + + encodedTx, err := encodeTransactionResponseBasedOnWantedEncoding(*params.Options.Encoding, tx) + if err != nil { + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, fmt.Errorf("failed to encode transaction: %v", err) + } + response.Transaction = encodedTx + } + + // reply with the data + err = conn.Reply( + ctx, + req.ID, + response, + func(m map[string]any) map[string]any { + return adaptTransactionMetaToExpectedOutput(m) + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to reply: %w", err) + } + return nil, nil +} diff --git a/multiepoch-getVersion.go b/multiepoch-getVersion.go new file mode 100644 index 00000000..ec81839d --- /dev/null +++ b/multiepoch-getVersion.go @@ -0,0 +1,48 @@ +package main + +import ( + "encoding/json" + "fmt" + + "github.com/sourcegraph/jsonrpc2" +) + +func (ser *MultiEpoch) tryEnrichGetVersion(body []byte) ([]byte, error) { + var decodedRemote jsonrpc2.Response + if err := json.Unmarshal(body, &decodedRemote); err != nil { + return nil, err + } + if decodedRemote.Error != nil || decodedRemote.Result == nil { + return nil, fmt.Errorf("response is not a success response") + } + // node decode the result: + var decodedResult map[string]any + if err := json.Unmarshal(*decodedRemote.Result, &decodedResult); err != nil { + return nil, fmt.Errorf("failed to decode result: %w", err) + } + // enrich the result: + faithfulVersion := ser.GetFaithfulVersionInfo() + decodedResult["faithful"] = faithfulVersion + + // re-encode the result: + encodedResult, err := json.Marshal(decodedResult) + if err != nil { + return nil, fmt.Errorf("failed to re-encode result: %w", err) + } + // re-encode the response: + decodedRemote.Result = (*json.RawMessage)(&encodedResult) + encodedResponse, err := json.Marshal(decodedRemote) + if err != nil { + return nil, fmt.Errorf("failed to re-encode response: %w", err) + } + // return the response: + return encodedResponse, nil +} + +func (ser *MultiEpoch) GetFaithfulVersionInfo() map[string]any { + faithfulVersion := make(map[string]any) + faithfulVersion["version"] = GitTag + faithfulVersion["commit"] = GitCommit + faithfulVersion["epochs"] = ser.GetEpochNumbers() + return faithfulVersion +} diff --git a/multiepoch.go b/multiepoch.go new file mode 100644 index 00000000..79628ae4 --- /dev/null +++ b/multiepoch.go @@ -0,0 +1,428 @@ +package main + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + "github.com/goware/urlx" + "github.com/mr-tron/base58" + "github.com/sourcegraph/jsonrpc2" + "github.com/valyala/fasthttp" + "k8s.io/klog/v2" +) + +type Options struct { + GsfaOnlySignatures bool + EpochSearchConcurrency int +} + +type MultiEpoch struct { + mu sync.RWMutex + options *Options + epochs map[uint64]*Epoch +} + +func NewMultiEpoch(options *Options) *MultiEpoch { + return &MultiEpoch{ + options: options, + epochs: make(map[uint64]*Epoch), + } +} + +func (m *MultiEpoch) GetEpoch(epoch uint64) (*Epoch, error) { + m.mu.RLock() + defer m.mu.RUnlock() + ep, ok := m.epochs[epoch] + if !ok { + return nil, fmt.Errorf("epoch %d not found", epoch) + } + return ep, nil +} + +func (m *MultiEpoch) HasEpoch(epoch uint64) bool { + m.mu.RLock() + defer m.mu.RUnlock() + _, ok := m.epochs[epoch] + return ok +} + +func (m *MultiEpoch) AddEpoch(epoch uint64, ep *Epoch) error { + m.mu.Lock() + defer m.mu.Unlock() + if _, ok := m.epochs[epoch]; ok { + return fmt.Errorf("epoch %d already exists", epoch) + } + m.epochs[epoch] = ep + return nil +} + +func (m *MultiEpoch) RemoveEpoch(epoch uint64) error { + m.mu.Lock() + defer m.mu.Unlock() + if _, ok := m.epochs[epoch]; !ok { + return fmt.Errorf("epoch %d not found", epoch) + } + delete(m.epochs, epoch) + return nil +} + +func (m *MultiEpoch) RemoveEpochByConfigFilepath(configFilepath string) (uint64, error) { + m.mu.Lock() + defer m.mu.Unlock() + for epoch, ep := range m.epochs { + if ep.config.ConfigFilepath() == configFilepath { + ep.Close() + delete(m.epochs, epoch) + return epoch, nil + } + } + return 0, fmt.Errorf("epoch not found for config file %q", configFilepath) +} + +func (m *MultiEpoch) ReplaceEpoch(epoch uint64, ep *Epoch) error { + m.mu.Lock() + defer m.mu.Unlock() + if _, ok := m.epochs[epoch]; !ok { + return fmt.Errorf("epoch %d not found", epoch) + } + m.epochs[epoch] = ep + return nil +} + +func (m *MultiEpoch) ReplaceOrAddEpoch(epoch uint64, ep *Epoch) error { + m.mu.Lock() + defer m.mu.Unlock() + // if the epoch already exists, close it + if oldEp, ok := m.epochs[epoch]; ok { + oldEp.Close() + } + m.epochs[epoch] = ep + return nil +} + +func (m *MultiEpoch) HasEpochWithSameHashAsFile(filepath string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + for _, ep := range m.epochs { + if ep.config.IsSameHashAsFile(filepath) { + return true + } + } + return false +} + +func (m *MultiEpoch) CountEpochs() int { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.epochs) +} + +func (m *MultiEpoch) GetEpochNumbers() []uint64 { + m.mu.RLock() + defer m.mu.RUnlock() + var epochNumbers []uint64 + for epochNumber := range m.epochs { + epochNumbers = append(epochNumbers, epochNumber) + } + sort.Slice(epochNumbers, func(i, j int) bool { + return epochNumbers[i] > epochNumbers[j] + }) + return epochNumbers +} + +func (m *MultiEpoch) GetFirstAvailableEpoch() (*Epoch, error) { + m.mu.RLock() + defer m.mu.RUnlock() + numbers := m.GetEpochNumbers() + if len(numbers) > 0 { + return m.epochs[numbers[0]], nil + } + return nil, fmt.Errorf("no epochs available") +} + +func (m *MultiEpoch) GetFirstAvailableEpochNumber() (uint64, error) { + numbers := m.GetEpochNumbers() + if len(numbers) > 0 { + return numbers[0], nil + } + return 0, fmt.Errorf("no epochs available") +} + +type ListenerConfig struct { + ProxyConfig *ProxyConfig +} + +type ProxyConfig struct { + Target string `json:"target" yaml:"target"` + Headers map[string]string `json:"headers" yaml:"headers"` + // ProxyFailedRequests will proxy requests that fail to be handled by the local RPC server. + ProxyFailedRequests bool `json:"proxyFailedRequests" yaml:"proxyFailedRequests"` +} + +func LoadProxyConfig(configFilepath string) (*ProxyConfig, error) { + var proxyConfig ProxyConfig + if isJSONFile(configFilepath) { + if err := loadFromJSON(configFilepath, &proxyConfig); err != nil { + return nil, err + } + } else if isYAMLFile(configFilepath) { + if err := loadFromYAML(configFilepath, &proxyConfig); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("config file %q must be JSON or YAML", configFilepath) + } + return &proxyConfig, nil +} + +// ListeAndServe starts listening on the configured address and serves the RPC API. +func (m *MultiEpoch) ListenAndServe(ctx context.Context, listenOn string, lsConf *ListenerConfig) error { + handler := newMultiEpochHandler(m, lsConf) + handler = fasthttp.CompressHandler(handler) + + klog.Infof("RPC server listening on %s", listenOn) + + s := &fasthttp.Server{ + Handler: handler, + MaxRequestBodySize: 1024 * 1024, + } + go func() { + // listen for context cancellation + <-ctx.Done() + klog.Info("RPC server shutting down...") + defer klog.Info("RPC server shut down") + if err := s.ShutdownWithContext(ctx); err != nil { + klog.Errorf("Error while shutting down RPC server: %s", err) + } + }() + return s.ListenAndServe(listenOn) +} + +func randomRequestID() string { + b := make([]byte, 4) + if _, err := rand.Read(b); err != nil { + panic(err) + } + return strings.ToUpper(base58.Encode(b)) +} + +func newMultiEpochHandler(handler *MultiEpoch, lsConf *ListenerConfig) func(ctx *fasthttp.RequestCtx) { + // create a transparent reverse proxy + var proxy *fasthttp.HostClient + if lsConf != nil && lsConf.ProxyConfig != nil && lsConf.ProxyConfig.Target != "" { + target := lsConf.ProxyConfig.Target + parsedTargetURL, err := urlx.Parse(target) + if err != nil { + panic(fmt.Errorf("invalid proxy target URL %q: %w", target, err)) + } + addr := parsedTargetURL.Hostname() + if parsedTargetURL.Port() != "" { + addr += ":" + parsedTargetURL.Port() + } + proxy = &fasthttp.HostClient{ + Addr: addr, + IsTLS: parsedTargetURL.Scheme == "https", + } + klog.Infof("Will proxy unhandled RPC methods to %q", addr) + } + return func(reqCtx *fasthttp.RequestCtx) { + startedAt := time.Now() + reqID := randomRequestID() + defer func() { + klog.Infof("[%s] request took %s", reqID, time.Since(startedAt)) + }() + { + // make sure the method is POST + if !reqCtx.IsPost() { + replyJSON(reqCtx, http.StatusMethodNotAllowed, jsonrpc2.Response{ + Error: &jsonrpc2.Error{ + Code: jsonrpc2.CodeMethodNotFound, + Message: "Method not allowed", + }, + }) + return + } + + // limit request body size + if reqCtx.Request.Header.ContentLength() > 1024 { + replyJSON(reqCtx, http.StatusRequestEntityTooLarge, jsonrpc2.Response{ + Error: &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidRequest, + Message: "Request entity too large", + }, + }) + return + } + } + // read request body + body := reqCtx.Request.Body() + + // parse request + var rpcRequest jsonrpc2.Request + if err := json.Unmarshal(body, &rpcRequest); err != nil { + klog.Errorf("[%s] failed to parse request body: %v", err) + replyJSON(reqCtx, http.StatusBadRequest, jsonrpc2.Response{ + Error: &jsonrpc2.Error{ + Code: jsonrpc2.CodeParseError, + Message: "Parse error", + }, + }) + return + } + + klog.Infof("[%s] received request: %q", reqID, strings.TrimSpace(string(body))) + + if proxy != nil && !isValidLocalMethod(rpcRequest.Method) { + klog.Infof("[%s] Unhandled method %q, proxying to %q", reqID, rpcRequest.Method, proxy.Addr) + // proxy the request to the target + proxyToAlternativeRPCServer( + handler, + lsConf, + proxy, + reqCtx, + &rpcRequest, + body, + reqID, + ) + return + } + + rqCtx := &requestContext{ctx: reqCtx} + method := rpcRequest.Method + + if method == "getVersion" { + faithfulVersion := handler.GetFaithfulVersionInfo() + err := rqCtx.ReplyRaw( + reqCtx, + rpcRequest.ID, + map[string]any{ + "faithful": faithfulVersion, + }, + ) + if err != nil { + klog.Errorf("[%s] failed to reply to getVersion: %v", reqID, err) + } + return + } + + // errorResp is the error response to be sent to the client. + errorResp, err := handler.handleRequest(reqCtx, rqCtx, &rpcRequest) + if err != nil { + klog.Errorf("[%s] failed to handle %s: %v", reqID, sanitizeMethod(method), err) + } + if errorResp != nil { + if proxy != nil && lsConf.ProxyConfig.ProxyFailedRequests { + klog.Infof("[%s] Failed local method %q, proxying to %q", reqID, rpcRequest.Method, proxy.Addr) + // proxy the request to the target + proxyToAlternativeRPCServer( + handler, + lsConf, + proxy, + reqCtx, + &rpcRequest, + body, + reqID, + ) + return + } else { + rqCtx.ReplyWithError( + reqCtx, + rpcRequest.ID, + errorResp, + ) + } + return + } + } +} + +func proxyToAlternativeRPCServer( + handler *MultiEpoch, + lsConf *ListenerConfig, + proxy *fasthttp.HostClient, + reqCtx *fasthttp.RequestCtx, + rpcRequest *jsonrpc2.Request, + body []byte, + reqID string, +) { + // proxy the request to the target + proxyReq := fasthttp.AcquireRequest() + defer fasthttp.ReleaseRequest(proxyReq) + { + for k, v := range lsConf.ProxyConfig.Headers { + proxyReq.Header.Set(k, v) + } + } + proxyReq.Header.SetMethod("POST") + proxyReq.Header.SetContentType("application/json") + proxyReq.SetRequestURI(lsConf.ProxyConfig.Target) + proxyReq.SetBody(body) + proxyResp := fasthttp.AcquireResponse() + defer fasthttp.ReleaseResponse(proxyResp) + if err := proxy.Do(proxyReq, proxyResp); err != nil { + klog.Errorf("[%s] failed to proxy request: %v", reqID, err) + replyJSON(reqCtx, http.StatusInternalServerError, jsonrpc2.Response{ + Error: &jsonrpc2.Error{ + Code: jsonrpc2.CodeInternalError, + Message: "Internal error", + }, + }) + return + } + reqCtx.Response.Header.Set("Content-Type", "application/json") + reqCtx.Response.SetStatusCode(proxyResp.StatusCode()) + if rpcRequest.Method == "getVersion" { + enriched, err := handler.tryEnrichGetVersion(proxyResp.Body()) + if err != nil { + klog.Errorf("[%s] failed to enrich getVersion response: %v", reqID, err) + reqCtx.Response.SetBody(proxyResp.Body()) + } else { + reqCtx.Response.SetBody(enriched) + } + } else { + reqCtx.Response.SetBody(proxyResp.Body()) + } + // TODO: handle compression. +} + +func sanitizeMethod(method string) string { + if isValidLocalMethod(method) { + return method + } + return "" +} + +func isValidLocalMethod(method string) bool { + switch method { + case "getBlock", "getTransaction", "getSignaturesForAddress", "getBlockTime": + return true + default: + return false + } +} + +// jsonrpc2.RequestHandler interface +func (ser *MultiEpoch) handleRequest(ctx context.Context, conn *requestContext, req *jsonrpc2.Request) (*jsonrpc2.Error, error) { + switch req.Method { + case "getBlock": + return ser.handleGetBlock(ctx, conn, req) + case "getTransaction": + return ser.handleGetTransaction(ctx, conn, req) + case "getSignaturesForAddress": + return ser.handleGetSignaturesForAddress(ctx, conn, req) + case "getBlockTime": + return ser.handleGetBlockTime(ctx, conn, req) + default: + return &jsonrpc2.Error{ + Code: jsonrpc2.CodeMethodNotFound, + Message: "Method not found", + }, fmt.Errorf("method not found") + } +} diff --git a/parse_legacy_transaction_status_meta/v-latest/parse_legacy_transaction_status_meta_ce598c5c98e7384c104fe7f5121e32c2c5a2d2eb.go b/parse_legacy_transaction_status_meta/v-latest/parse_legacy_transaction_status_meta_ce598c5c98e7384c104fe7f5121e32c2c5a2d2eb.go index 2cae683f..6be9ad93 100644 --- a/parse_legacy_transaction_status_meta/v-latest/parse_legacy_transaction_status_meta_ce598c5c98e7384c104fe7f5121e32c2c5a2d2eb.go +++ b/parse_legacy_transaction_status_meta/v-latest/parse_legacy_transaction_status_meta_ce598c5c98e7384c104fe7f5121e32c2c5a2d2eb.go @@ -1,24 +1,42 @@ package parse_legacy_transaction_status_meta_ce598c5c98e7384c104fe7f5121e32c2c5a2d2eb - import ( "fmt" - "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/serde" + "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/bincode" + "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/serde" + "k8s.io/klog" ) - type CompiledInstruction struct { ProgramIdIndex uint8 - Accounts struct {Field0 struct {Field0 uint8}; Field1 uint8; Field2 uint8; Field3 uint8} - Data struct {Field0 struct {Field0 uint8}; Field1 uint8; Field2 uint8; Field3 uint8} + Accounts struct { + Field0 struct{ Field0 uint8 } + Field1 uint8 + Field2 uint8 + Field3 uint8 + } + Data struct { + Field0 struct{ Field0 uint8 } + Field1 uint8 + Field2 uint8 + Field3 uint8 + } } func (obj *CompiledInstruction) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } - if err := serializer.SerializeU8(obj.ProgramIdIndex); err != nil { return err } - if err := serialize_tuple4_tuple1_u8_u8_u8_u8(obj.Accounts, serializer); err != nil { return err } - if err := serialize_tuple4_tuple1_u8_u8_u8_u8(obj.Data, serializer); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } + if err := serializer.SerializeU8(obj.ProgramIdIndex); err != nil { + return err + } + if err := serialize_tuple4_tuple1_u8_u8_u8_u8(obj.Accounts, serializer); err != nil { + return err + } + if err := serialize_tuple4_tuple1_u8_u8_u8_u8(obj.Data, serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -27,17 +45,33 @@ func (obj *CompiledInstruction) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func DeserializeCompiledInstruction(deserializer serde.Deserializer) (CompiledInstruction, error) { var obj CompiledInstruction - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.ProgramIdIndex = val } else { return obj, err } - if val, err := deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer); err == nil { obj.Accounts = val } else { return obj, err } - if val, err := deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer); err == nil { obj.Data = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.ProgramIdIndex = val + } else { + return obj, err + } + if val, err := deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer); err == nil { + obj.Accounts = val + } else { + return obj, err + } + if val, err := deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer); err == nil { + obj.Data = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -47,7 +81,7 @@ func BincodeDeserializeCompiledInstruction(input []byte) (CompiledInstruction, e var obj CompiledInstruction return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeCompiledInstruction(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -56,14 +90,20 @@ func BincodeDeserializeCompiledInstruction(input []byte) (CompiledInstruction, e } type InnerInstructions struct { - Index uint8 + Index uint8 Instructions []CompiledInstruction } func (obj *InnerInstructions) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } - if err := serializer.SerializeU8(obj.Index); err != nil { return err } - if err := serialize_vector_CompiledInstruction(obj.Instructions, serializer); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } + if err := serializer.SerializeU8(obj.Index); err != nil { + return err + } + if err := serialize_vector_CompiledInstruction(obj.Instructions, serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -72,16 +112,28 @@ func (obj *InnerInstructions) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func DeserializeInnerInstructions(deserializer serde.Deserializer) (InnerInstructions, error) { var obj InnerInstructions - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Index = val } else { return obj, err } - if val, err := deserialize_vector_CompiledInstruction(deserializer); err == nil { obj.Instructions = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Index = val + } else { + return obj, err + } + if val, err := deserialize_vector_CompiledInstruction(deserializer); err == nil { + obj.Instructions = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -91,7 +143,7 @@ func BincodeDeserializeInnerInstructions(input []byte) (InnerInstructions, error var obj InnerInstructions return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeInnerInstructions(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -107,7 +159,9 @@ type InstructionError interface { func DeserializeInstructionError(deserializer serde.Deserializer) (InstructionError, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -386,7 +440,7 @@ func BincodeDeserializeInstructionError(input []byte) (InstructionError, error) var obj InstructionError return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeInstructionError(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -394,13 +448,14 @@ func BincodeDeserializeInstructionError(input []byte) (InstructionError, error) return obj, err } -type InstructionError__GenericError struct { -} +type InstructionError__GenericError struct{} func (*InstructionError__GenericError) isInstructionError() {} func (obj *InstructionError__GenericError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) serializer.DecreaseContainerDepth() return nil @@ -410,25 +465,30 @@ func (obj *InstructionError__GenericError) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__GenericError(deserializer serde.Deserializer) (InstructionError__GenericError, error) { var obj InstructionError__GenericError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidArgument struct { -} +type InstructionError__InvalidArgument struct{} func (*InstructionError__InvalidArgument) isInstructionError() {} func (obj *InstructionError__InvalidArgument) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) serializer.DecreaseContainerDepth() return nil @@ -438,25 +498,30 @@ func (obj *InstructionError__InvalidArgument) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidArgument(deserializer serde.Deserializer) (InstructionError__InvalidArgument, error) { var obj InstructionError__InvalidArgument - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidInstructionData struct { -} +type InstructionError__InvalidInstructionData struct{} func (*InstructionError__InvalidInstructionData) isInstructionError() {} func (obj *InstructionError__InvalidInstructionData) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(2) serializer.DecreaseContainerDepth() return nil @@ -466,25 +531,30 @@ func (obj *InstructionError__InvalidInstructionData) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidInstructionData(deserializer serde.Deserializer) (InstructionError__InvalidInstructionData, error) { var obj InstructionError__InvalidInstructionData - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidAccountData struct { -} +type InstructionError__InvalidAccountData struct{} func (*InstructionError__InvalidAccountData) isInstructionError() {} func (obj *InstructionError__InvalidAccountData) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(3) serializer.DecreaseContainerDepth() return nil @@ -494,25 +564,30 @@ func (obj *InstructionError__InvalidAccountData) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidAccountData(deserializer serde.Deserializer) (InstructionError__InvalidAccountData, error) { var obj InstructionError__InvalidAccountData - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountDataTooSmall struct { -} +type InstructionError__AccountDataTooSmall struct{} func (*InstructionError__AccountDataTooSmall) isInstructionError() {} func (obj *InstructionError__AccountDataTooSmall) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(4) serializer.DecreaseContainerDepth() return nil @@ -522,25 +597,30 @@ func (obj *InstructionError__AccountDataTooSmall) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountDataTooSmall(deserializer serde.Deserializer) (InstructionError__AccountDataTooSmall, error) { var obj InstructionError__AccountDataTooSmall - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InsufficientFunds struct { -} +type InstructionError__InsufficientFunds struct{} func (*InstructionError__InsufficientFunds) isInstructionError() {} func (obj *InstructionError__InsufficientFunds) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(5) serializer.DecreaseContainerDepth() return nil @@ -550,25 +630,30 @@ func (obj *InstructionError__InsufficientFunds) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InsufficientFunds(deserializer serde.Deserializer) (InstructionError__InsufficientFunds, error) { var obj InstructionError__InsufficientFunds - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__IncorrectProgramId struct { -} +type InstructionError__IncorrectProgramId struct{} func (*InstructionError__IncorrectProgramId) isInstructionError() {} func (obj *InstructionError__IncorrectProgramId) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(6) serializer.DecreaseContainerDepth() return nil @@ -578,25 +663,30 @@ func (obj *InstructionError__IncorrectProgramId) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__IncorrectProgramId(deserializer serde.Deserializer) (InstructionError__IncorrectProgramId, error) { var obj InstructionError__IncorrectProgramId - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__MissingRequiredSignature struct { -} +type InstructionError__MissingRequiredSignature struct{} func (*InstructionError__MissingRequiredSignature) isInstructionError() {} func (obj *InstructionError__MissingRequiredSignature) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(7) serializer.DecreaseContainerDepth() return nil @@ -606,25 +696,30 @@ func (obj *InstructionError__MissingRequiredSignature) BincodeSerialize() ([]byt if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__MissingRequiredSignature(deserializer serde.Deserializer) (InstructionError__MissingRequiredSignature, error) { var obj InstructionError__MissingRequiredSignature - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountAlreadyInitialized struct { -} +type InstructionError__AccountAlreadyInitialized struct{} func (*InstructionError__AccountAlreadyInitialized) isInstructionError() {} func (obj *InstructionError__AccountAlreadyInitialized) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(8) serializer.DecreaseContainerDepth() return nil @@ -634,25 +729,30 @@ func (obj *InstructionError__AccountAlreadyInitialized) BincodeSerialize() ([]by if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountAlreadyInitialized(deserializer serde.Deserializer) (InstructionError__AccountAlreadyInitialized, error) { var obj InstructionError__AccountAlreadyInitialized - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__UninitializedAccount struct { -} +type InstructionError__UninitializedAccount struct{} func (*InstructionError__UninitializedAccount) isInstructionError() {} func (obj *InstructionError__UninitializedAccount) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(9) serializer.DecreaseContainerDepth() return nil @@ -662,25 +762,30 @@ func (obj *InstructionError__UninitializedAccount) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__UninitializedAccount(deserializer serde.Deserializer) (InstructionError__UninitializedAccount, error) { var obj InstructionError__UninitializedAccount - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__UnbalancedInstruction struct { -} +type InstructionError__UnbalancedInstruction struct{} func (*InstructionError__UnbalancedInstruction) isInstructionError() {} func (obj *InstructionError__UnbalancedInstruction) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(10) serializer.DecreaseContainerDepth() return nil @@ -690,25 +795,30 @@ func (obj *InstructionError__UnbalancedInstruction) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__UnbalancedInstruction(deserializer serde.Deserializer) (InstructionError__UnbalancedInstruction, error) { var obj InstructionError__UnbalancedInstruction - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ModifiedProgramId struct { -} +type InstructionError__ModifiedProgramId struct{} func (*InstructionError__ModifiedProgramId) isInstructionError() {} func (obj *InstructionError__ModifiedProgramId) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(11) serializer.DecreaseContainerDepth() return nil @@ -718,25 +828,30 @@ func (obj *InstructionError__ModifiedProgramId) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ModifiedProgramId(deserializer serde.Deserializer) (InstructionError__ModifiedProgramId, error) { var obj InstructionError__ModifiedProgramId - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExternalAccountLamportSpend struct { -} +type InstructionError__ExternalAccountLamportSpend struct{} func (*InstructionError__ExternalAccountLamportSpend) isInstructionError() {} func (obj *InstructionError__ExternalAccountLamportSpend) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(12) serializer.DecreaseContainerDepth() return nil @@ -746,25 +861,30 @@ func (obj *InstructionError__ExternalAccountLamportSpend) BincodeSerialize() ([] if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExternalAccountLamportSpend(deserializer serde.Deserializer) (InstructionError__ExternalAccountLamportSpend, error) { var obj InstructionError__ExternalAccountLamportSpend - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExternalAccountDataModified struct { -} +type InstructionError__ExternalAccountDataModified struct{} func (*InstructionError__ExternalAccountDataModified) isInstructionError() {} func (obj *InstructionError__ExternalAccountDataModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(13) serializer.DecreaseContainerDepth() return nil @@ -774,25 +894,30 @@ func (obj *InstructionError__ExternalAccountDataModified) BincodeSerialize() ([] if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExternalAccountDataModified(deserializer serde.Deserializer) (InstructionError__ExternalAccountDataModified, error) { var obj InstructionError__ExternalAccountDataModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ReadonlyLamportChange struct { -} +type InstructionError__ReadonlyLamportChange struct{} func (*InstructionError__ReadonlyLamportChange) isInstructionError() {} func (obj *InstructionError__ReadonlyLamportChange) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(14) serializer.DecreaseContainerDepth() return nil @@ -802,25 +927,30 @@ func (obj *InstructionError__ReadonlyLamportChange) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ReadonlyLamportChange(deserializer serde.Deserializer) (InstructionError__ReadonlyLamportChange, error) { var obj InstructionError__ReadonlyLamportChange - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ReadonlyDataModified struct { -} +type InstructionError__ReadonlyDataModified struct{} func (*InstructionError__ReadonlyDataModified) isInstructionError() {} func (obj *InstructionError__ReadonlyDataModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(15) serializer.DecreaseContainerDepth() return nil @@ -830,25 +960,30 @@ func (obj *InstructionError__ReadonlyDataModified) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ReadonlyDataModified(deserializer serde.Deserializer) (InstructionError__ReadonlyDataModified, error) { var obj InstructionError__ReadonlyDataModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__DuplicateAccountIndex struct { -} +type InstructionError__DuplicateAccountIndex struct{} func (*InstructionError__DuplicateAccountIndex) isInstructionError() {} func (obj *InstructionError__DuplicateAccountIndex) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(16) serializer.DecreaseContainerDepth() return nil @@ -858,25 +993,30 @@ func (obj *InstructionError__DuplicateAccountIndex) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__DuplicateAccountIndex(deserializer serde.Deserializer) (InstructionError__DuplicateAccountIndex, error) { var obj InstructionError__DuplicateAccountIndex - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExecutableModified struct { -} +type InstructionError__ExecutableModified struct{} func (*InstructionError__ExecutableModified) isInstructionError() {} func (obj *InstructionError__ExecutableModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(17) serializer.DecreaseContainerDepth() return nil @@ -886,25 +1026,30 @@ func (obj *InstructionError__ExecutableModified) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExecutableModified(deserializer serde.Deserializer) (InstructionError__ExecutableModified, error) { var obj InstructionError__ExecutableModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__RentEpochModified struct { -} +type InstructionError__RentEpochModified struct{} func (*InstructionError__RentEpochModified) isInstructionError() {} func (obj *InstructionError__RentEpochModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(18) serializer.DecreaseContainerDepth() return nil @@ -914,25 +1059,30 @@ func (obj *InstructionError__RentEpochModified) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__RentEpochModified(deserializer serde.Deserializer) (InstructionError__RentEpochModified, error) { var obj InstructionError__RentEpochModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__NotEnoughAccountKeys struct { -} +type InstructionError__NotEnoughAccountKeys struct{} func (*InstructionError__NotEnoughAccountKeys) isInstructionError() {} func (obj *InstructionError__NotEnoughAccountKeys) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(19) serializer.DecreaseContainerDepth() return nil @@ -942,25 +1092,30 @@ func (obj *InstructionError__NotEnoughAccountKeys) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__NotEnoughAccountKeys(deserializer serde.Deserializer) (InstructionError__NotEnoughAccountKeys, error) { var obj InstructionError__NotEnoughAccountKeys - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountDataSizeChanged struct { -} +type InstructionError__AccountDataSizeChanged struct{} func (*InstructionError__AccountDataSizeChanged) isInstructionError() {} func (obj *InstructionError__AccountDataSizeChanged) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(20) serializer.DecreaseContainerDepth() return nil @@ -970,25 +1125,30 @@ func (obj *InstructionError__AccountDataSizeChanged) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountDataSizeChanged(deserializer serde.Deserializer) (InstructionError__AccountDataSizeChanged, error) { var obj InstructionError__AccountDataSizeChanged - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountNotExecutable struct { -} +type InstructionError__AccountNotExecutable struct{} func (*InstructionError__AccountNotExecutable) isInstructionError() {} func (obj *InstructionError__AccountNotExecutable) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(21) serializer.DecreaseContainerDepth() return nil @@ -998,25 +1158,30 @@ func (obj *InstructionError__AccountNotExecutable) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountNotExecutable(deserializer serde.Deserializer) (InstructionError__AccountNotExecutable, error) { var obj InstructionError__AccountNotExecutable - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountBorrowFailed struct { -} +type InstructionError__AccountBorrowFailed struct{} func (*InstructionError__AccountBorrowFailed) isInstructionError() {} func (obj *InstructionError__AccountBorrowFailed) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(22) serializer.DecreaseContainerDepth() return nil @@ -1026,25 +1191,30 @@ func (obj *InstructionError__AccountBorrowFailed) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountBorrowFailed(deserializer serde.Deserializer) (InstructionError__AccountBorrowFailed, error) { var obj InstructionError__AccountBorrowFailed - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountBorrowOutstanding struct { -} +type InstructionError__AccountBorrowOutstanding struct{} func (*InstructionError__AccountBorrowOutstanding) isInstructionError() {} func (obj *InstructionError__AccountBorrowOutstanding) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(23) serializer.DecreaseContainerDepth() return nil @@ -1054,25 +1224,30 @@ func (obj *InstructionError__AccountBorrowOutstanding) BincodeSerialize() ([]byt if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountBorrowOutstanding(deserializer serde.Deserializer) (InstructionError__AccountBorrowOutstanding, error) { var obj InstructionError__AccountBorrowOutstanding - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__DuplicateAccountOutOfSync struct { -} +type InstructionError__DuplicateAccountOutOfSync struct{} func (*InstructionError__DuplicateAccountOutOfSync) isInstructionError() {} func (obj *InstructionError__DuplicateAccountOutOfSync) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(24) serializer.DecreaseContainerDepth() return nil @@ -1082,14 +1257,18 @@ func (obj *InstructionError__DuplicateAccountOutOfSync) BincodeSerialize() ([]by if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__DuplicateAccountOutOfSync(deserializer serde.Deserializer) (InstructionError__DuplicateAccountOutOfSync, error) { var obj InstructionError__DuplicateAccountOutOfSync - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1099,9 +1278,13 @@ type InstructionError__Custom uint32 func (*InstructionError__Custom) isInstructionError() {} func (obj *InstructionError__Custom) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(25) - if err := serializer.SerializeU32(((uint32)(*obj))); err != nil { return err } + if err := serializer.SerializeU32(((uint32)(*obj))); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1110,26 +1293,35 @@ func (obj *InstructionError__Custom) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__Custom(deserializer serde.Deserializer) (InstructionError__Custom, error) { var obj uint32 - if err := deserializer.IncreaseContainerDepth(); err != nil { return (InstructionError__Custom)(obj), err } - if val, err := deserializer.DeserializeU32(); err == nil { obj = val } else { return ((InstructionError__Custom)(obj)), err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return (InstructionError__Custom)(obj), err + } + if val, err := deserializer.DeserializeU32(); err == nil { + obj = val + } else { + return ((InstructionError__Custom)(obj)), err + } deserializer.DecreaseContainerDepth() return (InstructionError__Custom)(obj), nil } -type InstructionError__InvalidError struct { -} +type InstructionError__InvalidError struct{} func (*InstructionError__InvalidError) isInstructionError() {} func (obj *InstructionError__InvalidError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(26) serializer.DecreaseContainerDepth() return nil @@ -1139,25 +1331,30 @@ func (obj *InstructionError__InvalidError) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidError(deserializer serde.Deserializer) (InstructionError__InvalidError, error) { var obj InstructionError__InvalidError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExecutableDataModified struct { -} +type InstructionError__ExecutableDataModified struct{} func (*InstructionError__ExecutableDataModified) isInstructionError() {} func (obj *InstructionError__ExecutableDataModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(27) serializer.DecreaseContainerDepth() return nil @@ -1167,25 +1364,30 @@ func (obj *InstructionError__ExecutableDataModified) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExecutableDataModified(deserializer serde.Deserializer) (InstructionError__ExecutableDataModified, error) { var obj InstructionError__ExecutableDataModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExecutableLamportChange struct { -} +type InstructionError__ExecutableLamportChange struct{} func (*InstructionError__ExecutableLamportChange) isInstructionError() {} func (obj *InstructionError__ExecutableLamportChange) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(28) serializer.DecreaseContainerDepth() return nil @@ -1195,25 +1397,30 @@ func (obj *InstructionError__ExecutableLamportChange) BincodeSerialize() ([]byte if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExecutableLamportChange(deserializer serde.Deserializer) (InstructionError__ExecutableLamportChange, error) { var obj InstructionError__ExecutableLamportChange - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExecutableAccountNotRentExempt struct { -} +type InstructionError__ExecutableAccountNotRentExempt struct{} func (*InstructionError__ExecutableAccountNotRentExempt) isInstructionError() {} func (obj *InstructionError__ExecutableAccountNotRentExempt) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(29) serializer.DecreaseContainerDepth() return nil @@ -1223,25 +1430,30 @@ func (obj *InstructionError__ExecutableAccountNotRentExempt) BincodeSerialize() if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExecutableAccountNotRentExempt(deserializer serde.Deserializer) (InstructionError__ExecutableAccountNotRentExempt, error) { var obj InstructionError__ExecutableAccountNotRentExempt - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__UnsupportedProgramId struct { -} +type InstructionError__UnsupportedProgramId struct{} func (*InstructionError__UnsupportedProgramId) isInstructionError() {} func (obj *InstructionError__UnsupportedProgramId) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(30) serializer.DecreaseContainerDepth() return nil @@ -1251,25 +1463,30 @@ func (obj *InstructionError__UnsupportedProgramId) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__UnsupportedProgramId(deserializer serde.Deserializer) (InstructionError__UnsupportedProgramId, error) { var obj InstructionError__UnsupportedProgramId - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__CallDepth struct { -} +type InstructionError__CallDepth struct{} func (*InstructionError__CallDepth) isInstructionError() {} func (obj *InstructionError__CallDepth) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(31) serializer.DecreaseContainerDepth() return nil @@ -1279,25 +1496,30 @@ func (obj *InstructionError__CallDepth) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__CallDepth(deserializer serde.Deserializer) (InstructionError__CallDepth, error) { var obj InstructionError__CallDepth - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__MissingAccount struct { -} +type InstructionError__MissingAccount struct{} func (*InstructionError__MissingAccount) isInstructionError() {} func (obj *InstructionError__MissingAccount) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(32) serializer.DecreaseContainerDepth() return nil @@ -1307,25 +1529,30 @@ func (obj *InstructionError__MissingAccount) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__MissingAccount(deserializer serde.Deserializer) (InstructionError__MissingAccount, error) { var obj InstructionError__MissingAccount - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ReentrancyNotAllowed struct { -} +type InstructionError__ReentrancyNotAllowed struct{} func (*InstructionError__ReentrancyNotAllowed) isInstructionError() {} func (obj *InstructionError__ReentrancyNotAllowed) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(33) serializer.DecreaseContainerDepth() return nil @@ -1335,25 +1562,30 @@ func (obj *InstructionError__ReentrancyNotAllowed) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ReentrancyNotAllowed(deserializer serde.Deserializer) (InstructionError__ReentrancyNotAllowed, error) { var obj InstructionError__ReentrancyNotAllowed - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__MaxSeedLengthExceeded struct { -} +type InstructionError__MaxSeedLengthExceeded struct{} func (*InstructionError__MaxSeedLengthExceeded) isInstructionError() {} func (obj *InstructionError__MaxSeedLengthExceeded) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(34) serializer.DecreaseContainerDepth() return nil @@ -1363,25 +1595,30 @@ func (obj *InstructionError__MaxSeedLengthExceeded) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__MaxSeedLengthExceeded(deserializer serde.Deserializer) (InstructionError__MaxSeedLengthExceeded, error) { var obj InstructionError__MaxSeedLengthExceeded - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidSeeds struct { -} +type InstructionError__InvalidSeeds struct{} func (*InstructionError__InvalidSeeds) isInstructionError() {} func (obj *InstructionError__InvalidSeeds) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(35) serializer.DecreaseContainerDepth() return nil @@ -1391,25 +1628,30 @@ func (obj *InstructionError__InvalidSeeds) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidSeeds(deserializer serde.Deserializer) (InstructionError__InvalidSeeds, error) { var obj InstructionError__InvalidSeeds - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidRealloc struct { -} +type InstructionError__InvalidRealloc struct{} func (*InstructionError__InvalidRealloc) isInstructionError() {} func (obj *InstructionError__InvalidRealloc) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(36) serializer.DecreaseContainerDepth() return nil @@ -1419,25 +1661,30 @@ func (obj *InstructionError__InvalidRealloc) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidRealloc(deserializer serde.Deserializer) (InstructionError__InvalidRealloc, error) { var obj InstructionError__InvalidRealloc - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ComputationalBudgetExceeded struct { -} +type InstructionError__ComputationalBudgetExceeded struct{} func (*InstructionError__ComputationalBudgetExceeded) isInstructionError() {} func (obj *InstructionError__ComputationalBudgetExceeded) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(37) serializer.DecreaseContainerDepth() return nil @@ -1447,14 +1694,18 @@ func (obj *InstructionError__ComputationalBudgetExceeded) BincodeSerialize() ([] if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ComputationalBudgetExceeded(deserializer serde.Deserializer) (InstructionError__ComputationalBudgetExceeded, error) { var obj InstructionError__ComputationalBudgetExceeded - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1467,7 +1718,9 @@ type Result interface { func DeserializeResult(deserializer serde.Deserializer) (Result, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -1494,7 +1747,7 @@ func BincodeDeserializeResult(input []byte) (Result, error) { var obj Result return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeResult(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -1502,14 +1755,18 @@ func BincodeDeserializeResult(input []byte) (Result, error) { return obj, err } -type Result__Ok struct {} +type Result__Ok struct{} func (*Result__Ok) isResult() {} func (obj *Result__Ok) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) - if err := serializer.SerializeUnit(((struct {})(*obj))); err != nil { return err } + if err := serializer.SerializeUnit(((struct{})(*obj))); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1518,15 +1775,23 @@ func (obj *Result__Ok) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_Result__Ok(deserializer serde.Deserializer) (Result__Ok, error) { - var obj struct {} - if err := deserializer.IncreaseContainerDepth(); err != nil { return (Result__Ok)(obj), err } - if val, err := deserializer.DeserializeUnit(); err == nil { obj = val } else { return ((Result__Ok)(obj)), err } + var obj struct{} + if err := deserializer.IncreaseContainerDepth(); err != nil { + return (Result__Ok)(obj), err + } + if val, err := deserializer.DeserializeUnit(); err == nil { + obj = val + } else { + return ((Result__Ok)(obj)), err + } deserializer.DecreaseContainerDepth() return (Result__Ok)(obj), nil } @@ -1538,9 +1803,13 @@ type Result__Err struct { func (*Result__Err) isResult() {} func (obj *Result__Err) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) - if err := obj.Value.Serialize(serializer); err != nil { return err } + if err := obj.Value.Serialize(serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1549,15 +1818,23 @@ func (obj *Result__Err) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_Result__Err(deserializer serde.Deserializer) (Result__Err, error) { var obj Result__Err - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := DeserializeTransactionError(deserializer); err == nil { obj.Value = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := DeserializeTransactionError(deserializer); err == nil { + obj.Value = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1570,7 +1847,9 @@ type TransactionError interface { func DeserializeTransactionError(deserializer serde.Deserializer) (TransactionError, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -1695,7 +1974,7 @@ func BincodeDeserializeTransactionError(input []byte) (TransactionError, error) var obj TransactionError return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeTransactionError(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -1703,13 +1982,14 @@ func BincodeDeserializeTransactionError(input []byte) (TransactionError, error) return obj, err } -type TransactionError__AccountInUse struct { -} +type TransactionError__AccountInUse struct{} func (*TransactionError__AccountInUse) isTransactionError() {} func (obj *TransactionError__AccountInUse) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) serializer.DecreaseContainerDepth() return nil @@ -1719,25 +1999,30 @@ func (obj *TransactionError__AccountInUse) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountInUse(deserializer serde.Deserializer) (TransactionError__AccountInUse, error) { var obj TransactionError__AccountInUse - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__AccountLoadedTwice struct { -} +type TransactionError__AccountLoadedTwice struct{} func (*TransactionError__AccountLoadedTwice) isTransactionError() {} func (obj *TransactionError__AccountLoadedTwice) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) serializer.DecreaseContainerDepth() return nil @@ -1747,25 +2032,30 @@ func (obj *TransactionError__AccountLoadedTwice) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountLoadedTwice(deserializer serde.Deserializer) (TransactionError__AccountLoadedTwice, error) { var obj TransactionError__AccountLoadedTwice - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__AccountNotFound struct { -} +type TransactionError__AccountNotFound struct{} func (*TransactionError__AccountNotFound) isTransactionError() {} func (obj *TransactionError__AccountNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(2) serializer.DecreaseContainerDepth() return nil @@ -1775,25 +2065,30 @@ func (obj *TransactionError__AccountNotFound) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountNotFound(deserializer serde.Deserializer) (TransactionError__AccountNotFound, error) { var obj TransactionError__AccountNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__ProgramAccountNotFound struct { -} +type TransactionError__ProgramAccountNotFound struct{} func (*TransactionError__ProgramAccountNotFound) isTransactionError() {} func (obj *TransactionError__ProgramAccountNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(3) serializer.DecreaseContainerDepth() return nil @@ -1803,25 +2098,30 @@ func (obj *TransactionError__ProgramAccountNotFound) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__ProgramAccountNotFound(deserializer serde.Deserializer) (TransactionError__ProgramAccountNotFound, error) { var obj TransactionError__ProgramAccountNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InsufficientFundsForFee struct { -} +type TransactionError__InsufficientFundsForFee struct{} func (*TransactionError__InsufficientFundsForFee) isTransactionError() {} func (obj *TransactionError__InsufficientFundsForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(4) serializer.DecreaseContainerDepth() return nil @@ -1831,25 +2131,30 @@ func (obj *TransactionError__InsufficientFundsForFee) BincodeSerialize() ([]byte if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InsufficientFundsForFee(deserializer serde.Deserializer) (TransactionError__InsufficientFundsForFee, error) { var obj TransactionError__InsufficientFundsForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidAccountForFee struct { -} +type TransactionError__InvalidAccountForFee struct{} func (*TransactionError__InvalidAccountForFee) isTransactionError() {} func (obj *TransactionError__InvalidAccountForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(5) serializer.DecreaseContainerDepth() return nil @@ -1859,25 +2164,30 @@ func (obj *TransactionError__InvalidAccountForFee) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidAccountForFee(deserializer serde.Deserializer) (TransactionError__InvalidAccountForFee, error) { var obj TransactionError__InvalidAccountForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__DuplicateSignature struct { -} +type TransactionError__DuplicateSignature struct{} func (*TransactionError__DuplicateSignature) isTransactionError() {} func (obj *TransactionError__DuplicateSignature) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(6) serializer.DecreaseContainerDepth() return nil @@ -1887,25 +2197,30 @@ func (obj *TransactionError__DuplicateSignature) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__DuplicateSignature(deserializer serde.Deserializer) (TransactionError__DuplicateSignature, error) { var obj TransactionError__DuplicateSignature - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__BlockhashNotFound struct { -} +type TransactionError__BlockhashNotFound struct{} func (*TransactionError__BlockhashNotFound) isTransactionError() {} func (obj *TransactionError__BlockhashNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(7) serializer.DecreaseContainerDepth() return nil @@ -1915,14 +2230,18 @@ func (obj *TransactionError__BlockhashNotFound) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__BlockhashNotFound(deserializer serde.Deserializer) (TransactionError__BlockhashNotFound, error) { var obj TransactionError__BlockhashNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1935,10 +2254,16 @@ type TransactionError__InstructionError struct { func (*TransactionError__InstructionError) isTransactionError() {} func (obj *TransactionError__InstructionError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(8) - if err := serializer.SerializeU8(obj.Field0); err != nil { return err } - if err := obj.Field1.Serialize(serializer); err != nil { return err } + if err := serializer.SerializeU8(obj.Field0); err != nil { + return err + } + if err := obj.Field1.Serialize(serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1947,27 +2272,40 @@ func (obj *TransactionError__InstructionError) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InstructionError(deserializer serde.Deserializer) (TransactionError__InstructionError, error) { var obj TransactionError__InstructionError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field0 = val } else { return obj, err } - if val, err := DeserializeInstructionError(deserializer); err == nil { obj.Field1 = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field0 = val + } else { + return obj, err + } + if val, err := DeserializeInstructionError(deserializer); err == nil { + obj.Field1 = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__CallChainTooDeep struct { -} +type TransactionError__CallChainTooDeep struct{} func (*TransactionError__CallChainTooDeep) isTransactionError() {} func (obj *TransactionError__CallChainTooDeep) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(9) serializer.DecreaseContainerDepth() return nil @@ -1977,25 +2315,30 @@ func (obj *TransactionError__CallChainTooDeep) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__CallChainTooDeep(deserializer serde.Deserializer) (TransactionError__CallChainTooDeep, error) { var obj TransactionError__CallChainTooDeep - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__MissingSignatureForFee struct { -} +type TransactionError__MissingSignatureForFee struct{} func (*TransactionError__MissingSignatureForFee) isTransactionError() {} func (obj *TransactionError__MissingSignatureForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(10) serializer.DecreaseContainerDepth() return nil @@ -2005,25 +2348,30 @@ func (obj *TransactionError__MissingSignatureForFee) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__MissingSignatureForFee(deserializer serde.Deserializer) (TransactionError__MissingSignatureForFee, error) { var obj TransactionError__MissingSignatureForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidAccountIndex struct { -} +type TransactionError__InvalidAccountIndex struct{} func (*TransactionError__InvalidAccountIndex) isTransactionError() {} func (obj *TransactionError__InvalidAccountIndex) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(11) serializer.DecreaseContainerDepth() return nil @@ -2033,25 +2381,30 @@ func (obj *TransactionError__InvalidAccountIndex) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidAccountIndex(deserializer serde.Deserializer) (TransactionError__InvalidAccountIndex, error) { var obj TransactionError__InvalidAccountIndex - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__SignatureFailure struct { -} +type TransactionError__SignatureFailure struct{} func (*TransactionError__SignatureFailure) isTransactionError() {} func (obj *TransactionError__SignatureFailure) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(12) serializer.DecreaseContainerDepth() return nil @@ -2061,25 +2414,30 @@ func (obj *TransactionError__SignatureFailure) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__SignatureFailure(deserializer serde.Deserializer) (TransactionError__SignatureFailure, error) { var obj TransactionError__SignatureFailure - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidProgramForExecution struct { -} +type TransactionError__InvalidProgramForExecution struct{} func (*TransactionError__InvalidProgramForExecution) isTransactionError() {} func (obj *TransactionError__InvalidProgramForExecution) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(13) serializer.DecreaseContainerDepth() return nil @@ -2089,25 +2447,30 @@ func (obj *TransactionError__InvalidProgramForExecution) BincodeSerialize() ([]b if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidProgramForExecution(deserializer serde.Deserializer) (TransactionError__InvalidProgramForExecution, error) { var obj TransactionError__InvalidProgramForExecution - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__SanitizeFailure struct { -} +type TransactionError__SanitizeFailure struct{} func (*TransactionError__SanitizeFailure) isTransactionError() {} func (obj *TransactionError__SanitizeFailure) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(14) serializer.DecreaseContainerDepth() return nil @@ -2117,25 +2480,30 @@ func (obj *TransactionError__SanitizeFailure) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__SanitizeFailure(deserializer serde.Deserializer) (TransactionError__SanitizeFailure, error) { var obj TransactionError__SanitizeFailure - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__ClusterMaintenance struct { -} +type TransactionError__ClusterMaintenance struct{} func (*TransactionError__ClusterMaintenance) isTransactionError() {} func (obj *TransactionError__ClusterMaintenance) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(15) serializer.DecreaseContainerDepth() return nil @@ -2145,33 +2513,49 @@ func (obj *TransactionError__ClusterMaintenance) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__ClusterMaintenance(deserializer serde.Deserializer) (TransactionError__ClusterMaintenance, error) { var obj TransactionError__ClusterMaintenance - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } type TransactionStatusMeta struct { - Status Result - Fee uint64 - PreBalances []uint64 - PostBalances []uint64 + Status Result + Fee uint64 + PreBalances []uint64 + PostBalances []uint64 InnerInstructions *[]InnerInstructions } func (obj *TransactionStatusMeta) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } - if err := obj.Status.Serialize(serializer); err != nil { return err } - if err := serializer.SerializeU64(obj.Fee); err != nil { return err } - if err := serialize_vector_u64(obj.PreBalances, serializer); err != nil { return err } - if err := serialize_vector_u64(obj.PostBalances, serializer); err != nil { return err } - if err := serialize_option_vector_InnerInstructions(obj.InnerInstructions, serializer); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } + if err := obj.Status.Serialize(serializer); err != nil { + return err + } + if err := serializer.SerializeU64(obj.Fee); err != nil { + return err + } + if err := serialize_vector_u64(obj.PreBalances, serializer); err != nil { + return err + } + if err := serialize_vector_u64(obj.PostBalances, serializer); err != nil { + return err + } + if err := serialize_option_vector_InnerInstructions(obj.InnerInstructions, serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -2180,19 +2564,43 @@ func (obj *TransactionStatusMeta) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func DeserializeTransactionStatusMeta(deserializer serde.Deserializer) (TransactionStatusMeta, error) { var obj TransactionStatusMeta - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := DeserializeResult(deserializer); err == nil { obj.Status = val } else { return obj, err } - if val, err := deserializer.DeserializeU64(); err == nil { obj.Fee = val } else { return obj, err } - if val, err := deserialize_vector_u64(deserializer); err == nil { obj.PreBalances = val } else { return obj, err } - if val, err := deserialize_vector_u64(deserializer); err == nil { obj.PostBalances = val } else { return obj, err } - if val, err := deserialize_option_vector_InnerInstructions(deserializer); err == nil { obj.InnerInstructions = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := DeserializeResult(deserializer); err == nil { + obj.Status = val + } else { + return obj, err + } + if val, err := deserializer.DeserializeU64(); err == nil { + obj.Fee = val + } else { + return obj, err + } + if val, err := deserialize_vector_u64(deserializer); err == nil { + obj.PreBalances = val + } else { + return obj, err + } + if val, err := deserialize_vector_u64(deserializer); err == nil { + obj.PostBalances = val + } else { + return obj, err + } + if val, err := deserialize_option_vector_InnerInstructions(deserializer); err == nil { + obj.InnerInstructions = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -2202,114 +2610,210 @@ func BincodeDeserializeTransactionStatusMeta(input []byte) (TransactionStatusMet var obj TransactionStatusMeta return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeTransactionStatusMeta(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { - return obj, fmt.Errorf("Some input bytes were not read") + // return obj, fmt.Errorf("Some input bytes were not read") + // TODO: fix this + klog.Warningf( + "Parsed %d bytes, but input was %d bytes (%d bytes not read)", + deserializer.GetBufferOffset(), + len(input), + len(input)-int(deserializer.GetBufferOffset()), + ) } return obj, err } + func serialize_option_vector_InnerInstructions(value *[]InnerInstructions, serializer serde.Serializer) error { if value != nil { - if err := serializer.SerializeOptionTag(true); err != nil { return err } - if err := serialize_vector_InnerInstructions((*value), serializer); err != nil { return err } + if err := serializer.SerializeOptionTag(true); err != nil { + return err + } + if err := serialize_vector_InnerInstructions((*value), serializer); err != nil { + return err + } } else { - if err := serializer.SerializeOptionTag(false); err != nil { return err } + if err := serializer.SerializeOptionTag(false); err != nil { + return err + } } return nil } func deserialize_option_vector_InnerInstructions(deserializer serde.Deserializer) (*[]InnerInstructions, error) { tag, err := deserializer.DeserializeOptionTag() - if err != nil { return nil, err } + if err != nil { + return nil, err + } if tag { value := new([]InnerInstructions) - if val, err := deserialize_vector_InnerInstructions(deserializer); err == nil { *value = val } else { return nil, err } - return value, nil + if val, err := deserialize_vector_InnerInstructions(deserializer); err == nil { + *value = val + } else { + return nil, err + } + return value, nil } else { return nil, nil } } -func serialize_tuple1_u8(value struct {Field0 uint8}, serializer serde.Serializer) error { - if err := serializer.SerializeU8(value.Field0); err != nil { return err } +func serialize_tuple1_u8(value struct{ Field0 uint8 }, serializer serde.Serializer) error { + if err := serializer.SerializeU8(value.Field0); err != nil { + return err + } return nil } -func deserialize_tuple1_u8(deserializer serde.Deserializer) (struct {Field0 uint8}, error) { - var obj struct {Field0 uint8} - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field0 = val } else { return obj, err } +func deserialize_tuple1_u8(deserializer serde.Deserializer) (struct{ Field0 uint8 }, error) { + var obj struct{ Field0 uint8 } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field0 = val + } else { + return obj, err + } return obj, nil } -func serialize_tuple4_tuple1_u8_u8_u8_u8(value struct {Field0 struct {Field0 uint8}; Field1 uint8; Field2 uint8; Field3 uint8}, serializer serde.Serializer) error { - if err := serialize_tuple1_u8(value.Field0, serializer); err != nil { return err } - if err := serializer.SerializeU8(value.Field1); err != nil { return err } - if err := serializer.SerializeU8(value.Field2); err != nil { return err } - if err := serializer.SerializeU8(value.Field3); err != nil { return err } +func serialize_tuple4_tuple1_u8_u8_u8_u8(value struct { + Field0 struct{ Field0 uint8 } + Field1 uint8 + Field2 uint8 + Field3 uint8 +}, serializer serde.Serializer, +) error { + if err := serialize_tuple1_u8(value.Field0, serializer); err != nil { + return err + } + if err := serializer.SerializeU8(value.Field1); err != nil { + return err + } + if err := serializer.SerializeU8(value.Field2); err != nil { + return err + } + if err := serializer.SerializeU8(value.Field3); err != nil { + return err + } return nil } -func deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer serde.Deserializer) (struct {Field0 struct {Field0 uint8}; Field1 uint8; Field2 uint8; Field3 uint8}, error) { - var obj struct {Field0 struct {Field0 uint8}; Field1 uint8; Field2 uint8; Field3 uint8} - if val, err := deserialize_tuple1_u8(deserializer); err == nil { obj.Field0 = val } else { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field1 = val } else { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field2 = val } else { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field3 = val } else { return obj, err } +func deserialize_tuple4_tuple1_u8_u8_u8_u8(deserializer serde.Deserializer) (struct { + Field0 struct{ Field0 uint8 } + Field1 uint8 + Field2 uint8 + Field3 uint8 +}, error, +) { + var obj struct { + Field0 struct{ Field0 uint8 } + Field1 uint8 + Field2 uint8 + Field3 uint8 + } + if val, err := deserialize_tuple1_u8(deserializer); err == nil { + obj.Field0 = val + } else { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field1 = val + } else { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field2 = val + } else { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field3 = val + } else { + return obj, err + } return obj, nil } func serialize_vector_CompiledInstruction(value []CompiledInstruction, serializer serde.Serializer) error { - if err := serializer.SerializeLen(uint64(len(value))); err != nil { return err } - for _, item := range(value) { - if err := item.Serialize(serializer); err != nil { return err } + if err := serializer.SerializeLen(uint64(len(value))); err != nil { + return err + } + for _, item := range value { + if err := item.Serialize(serializer); err != nil { + return err + } } return nil } func deserialize_vector_CompiledInstruction(deserializer serde.Deserializer) ([]CompiledInstruction, error) { length, err := deserializer.DeserializeLen() - if err != nil { return nil, err } + if err != nil { + return nil, err + } obj := make([]CompiledInstruction, length) - for i := range(obj) { - if val, err := DeserializeCompiledInstruction(deserializer); err == nil { obj[i] = val } else { return nil, err } + for i := range obj { + if val, err := DeserializeCompiledInstruction(deserializer); err == nil { + obj[i] = val + } else { + return nil, err + } } return obj, nil } func serialize_vector_InnerInstructions(value []InnerInstructions, serializer serde.Serializer) error { - if err := serializer.SerializeLen(uint64(len(value))); err != nil { return err } - for _, item := range(value) { - if err := item.Serialize(serializer); err != nil { return err } + if err := serializer.SerializeLen(uint64(len(value))); err != nil { + return err + } + for _, item := range value { + if err := item.Serialize(serializer); err != nil { + return err + } } return nil } func deserialize_vector_InnerInstructions(deserializer serde.Deserializer) ([]InnerInstructions, error) { length, err := deserializer.DeserializeLen() - if err != nil { return nil, err } + if err != nil { + return nil, err + } obj := make([]InnerInstructions, length) - for i := range(obj) { - if val, err := DeserializeInnerInstructions(deserializer); err == nil { obj[i] = val } else { return nil, err } + for i := range obj { + if val, err := DeserializeInnerInstructions(deserializer); err == nil { + obj[i] = val + } else { + return nil, err + } } return obj, nil } func serialize_vector_u64(value []uint64, serializer serde.Serializer) error { - if err := serializer.SerializeLen(uint64(len(value))); err != nil { return err } - for _, item := range(value) { - if err := serializer.SerializeU64(item); err != nil { return err } + if err := serializer.SerializeLen(uint64(len(value))); err != nil { + return err + } + for _, item := range value { + if err := serializer.SerializeU64(item); err != nil { + return err + } } return nil } func deserialize_vector_u64(deserializer serde.Deserializer) ([]uint64, error) { length, err := deserializer.DeserializeLen() - if err != nil { return nil, err } + if err != nil { + return nil, err + } obj := make([]uint64, length) - for i := range(obj) { - if val, err := deserializer.DeserializeU64(); err == nil { obj[i] = val } else { return nil, err } + for i := range obj { + if val, err := deserializer.DeserializeU64(); err == nil { + obj[i] = val + } else { + return nil, err + } } return obj, nil } - diff --git a/parse_legacy_transaction_status_meta/v-oldest/parse_legacy_transaction_status_meta_b7b4aa5d4d34ebf3fd338a64f4f2a5257b047bb4.go b/parse_legacy_transaction_status_meta/v-oldest/parse_legacy_transaction_status_meta_b7b4aa5d4d34ebf3fd338a64f4f2a5257b047bb4.go index cedf667b..d28555ef 100644 --- a/parse_legacy_transaction_status_meta/v-oldest/parse_legacy_transaction_status_meta_b7b4aa5d4d34ebf3fd338a64f4f2a5257b047bb4.go +++ b/parse_legacy_transaction_status_meta/v-oldest/parse_legacy_transaction_status_meta_b7b4aa5d4d34ebf3fd338a64f4f2a5257b047bb4.go @@ -1,13 +1,13 @@ package parse_legacy_transaction_status_meta_b7b4aa5d4d34ebf3fd338a64f4f2a5257b047bb4 - import ( "fmt" - "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/serde" + "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/bincode" + "github.com/novifinancial/serde-reflection/serde-generate/runtime/golang/serde" + "k8s.io/klog/v2" ) - type InstructionError interface { isInstructionError() Serialize(serializer serde.Serializer) error @@ -16,7 +16,9 @@ type InstructionError interface { func DeserializeInstructionError(deserializer serde.Deserializer) (InstructionError, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -218,7 +220,7 @@ func BincodeDeserializeInstructionError(input []byte) (InstructionError, error) var obj InstructionError return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeInstructionError(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -226,13 +228,14 @@ func BincodeDeserializeInstructionError(input []byte) (InstructionError, error) return obj, err } -type InstructionError__GenericError struct { -} +type InstructionError__GenericError struct{} func (*InstructionError__GenericError) isInstructionError() {} func (obj *InstructionError__GenericError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) serializer.DecreaseContainerDepth() return nil @@ -242,25 +245,30 @@ func (obj *InstructionError__GenericError) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__GenericError(deserializer serde.Deserializer) (InstructionError__GenericError, error) { var obj InstructionError__GenericError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidArgument struct { -} +type InstructionError__InvalidArgument struct{} func (*InstructionError__InvalidArgument) isInstructionError() {} func (obj *InstructionError__InvalidArgument) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) serializer.DecreaseContainerDepth() return nil @@ -270,25 +278,30 @@ func (obj *InstructionError__InvalidArgument) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidArgument(deserializer serde.Deserializer) (InstructionError__InvalidArgument, error) { var obj InstructionError__InvalidArgument - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidInstructionData struct { -} +type InstructionError__InvalidInstructionData struct{} func (*InstructionError__InvalidInstructionData) isInstructionError() {} func (obj *InstructionError__InvalidInstructionData) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(2) serializer.DecreaseContainerDepth() return nil @@ -298,25 +311,30 @@ func (obj *InstructionError__InvalidInstructionData) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidInstructionData(deserializer serde.Deserializer) (InstructionError__InvalidInstructionData, error) { var obj InstructionError__InvalidInstructionData - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InvalidAccountData struct { -} +type InstructionError__InvalidAccountData struct{} func (*InstructionError__InvalidAccountData) isInstructionError() {} func (obj *InstructionError__InvalidAccountData) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(3) serializer.DecreaseContainerDepth() return nil @@ -326,25 +344,30 @@ func (obj *InstructionError__InvalidAccountData) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidAccountData(deserializer serde.Deserializer) (InstructionError__InvalidAccountData, error) { var obj InstructionError__InvalidAccountData - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountDataTooSmall struct { -} +type InstructionError__AccountDataTooSmall struct{} func (*InstructionError__AccountDataTooSmall) isInstructionError() {} func (obj *InstructionError__AccountDataTooSmall) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(4) serializer.DecreaseContainerDepth() return nil @@ -354,25 +377,30 @@ func (obj *InstructionError__AccountDataTooSmall) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountDataTooSmall(deserializer serde.Deserializer) (InstructionError__AccountDataTooSmall, error) { var obj InstructionError__AccountDataTooSmall - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__InsufficientFunds struct { -} +type InstructionError__InsufficientFunds struct{} func (*InstructionError__InsufficientFunds) isInstructionError() {} func (obj *InstructionError__InsufficientFunds) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(5) serializer.DecreaseContainerDepth() return nil @@ -382,25 +410,30 @@ func (obj *InstructionError__InsufficientFunds) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InsufficientFunds(deserializer serde.Deserializer) (InstructionError__InsufficientFunds, error) { var obj InstructionError__InsufficientFunds - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__IncorrectProgramId struct { -} +type InstructionError__IncorrectProgramId struct{} func (*InstructionError__IncorrectProgramId) isInstructionError() {} func (obj *InstructionError__IncorrectProgramId) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(6) serializer.DecreaseContainerDepth() return nil @@ -410,25 +443,30 @@ func (obj *InstructionError__IncorrectProgramId) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__IncorrectProgramId(deserializer serde.Deserializer) (InstructionError__IncorrectProgramId, error) { var obj InstructionError__IncorrectProgramId - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__MissingRequiredSignature struct { -} +type InstructionError__MissingRequiredSignature struct{} func (*InstructionError__MissingRequiredSignature) isInstructionError() {} func (obj *InstructionError__MissingRequiredSignature) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(7) serializer.DecreaseContainerDepth() return nil @@ -438,25 +476,30 @@ func (obj *InstructionError__MissingRequiredSignature) BincodeSerialize() ([]byt if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__MissingRequiredSignature(deserializer serde.Deserializer) (InstructionError__MissingRequiredSignature, error) { var obj InstructionError__MissingRequiredSignature - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountAlreadyInitialized struct { -} +type InstructionError__AccountAlreadyInitialized struct{} func (*InstructionError__AccountAlreadyInitialized) isInstructionError() {} func (obj *InstructionError__AccountAlreadyInitialized) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(8) serializer.DecreaseContainerDepth() return nil @@ -466,25 +509,30 @@ func (obj *InstructionError__AccountAlreadyInitialized) BincodeSerialize() ([]by if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountAlreadyInitialized(deserializer serde.Deserializer) (InstructionError__AccountAlreadyInitialized, error) { var obj InstructionError__AccountAlreadyInitialized - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__UninitializedAccount struct { -} +type InstructionError__UninitializedAccount struct{} func (*InstructionError__UninitializedAccount) isInstructionError() {} func (obj *InstructionError__UninitializedAccount) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(9) serializer.DecreaseContainerDepth() return nil @@ -494,25 +542,30 @@ func (obj *InstructionError__UninitializedAccount) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__UninitializedAccount(deserializer serde.Deserializer) (InstructionError__UninitializedAccount, error) { var obj InstructionError__UninitializedAccount - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__UnbalancedInstruction struct { -} +type InstructionError__UnbalancedInstruction struct{} func (*InstructionError__UnbalancedInstruction) isInstructionError() {} func (obj *InstructionError__UnbalancedInstruction) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(10) serializer.DecreaseContainerDepth() return nil @@ -522,25 +575,30 @@ func (obj *InstructionError__UnbalancedInstruction) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__UnbalancedInstruction(deserializer serde.Deserializer) (InstructionError__UnbalancedInstruction, error) { var obj InstructionError__UnbalancedInstruction - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ModifiedProgramId struct { -} +type InstructionError__ModifiedProgramId struct{} func (*InstructionError__ModifiedProgramId) isInstructionError() {} func (obj *InstructionError__ModifiedProgramId) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(11) serializer.DecreaseContainerDepth() return nil @@ -550,25 +608,30 @@ func (obj *InstructionError__ModifiedProgramId) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ModifiedProgramId(deserializer serde.Deserializer) (InstructionError__ModifiedProgramId, error) { var obj InstructionError__ModifiedProgramId - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExternalAccountLamportSpend struct { -} +type InstructionError__ExternalAccountLamportSpend struct{} func (*InstructionError__ExternalAccountLamportSpend) isInstructionError() {} func (obj *InstructionError__ExternalAccountLamportSpend) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(12) serializer.DecreaseContainerDepth() return nil @@ -578,25 +641,30 @@ func (obj *InstructionError__ExternalAccountLamportSpend) BincodeSerialize() ([] if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExternalAccountLamportSpend(deserializer serde.Deserializer) (InstructionError__ExternalAccountLamportSpend, error) { var obj InstructionError__ExternalAccountLamportSpend - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExternalAccountDataModified struct { -} +type InstructionError__ExternalAccountDataModified struct{} func (*InstructionError__ExternalAccountDataModified) isInstructionError() {} func (obj *InstructionError__ExternalAccountDataModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(13) serializer.DecreaseContainerDepth() return nil @@ -606,25 +674,30 @@ func (obj *InstructionError__ExternalAccountDataModified) BincodeSerialize() ([] if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExternalAccountDataModified(deserializer serde.Deserializer) (InstructionError__ExternalAccountDataModified, error) { var obj InstructionError__ExternalAccountDataModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ReadonlyLamportChange struct { -} +type InstructionError__ReadonlyLamportChange struct{} func (*InstructionError__ReadonlyLamportChange) isInstructionError() {} func (obj *InstructionError__ReadonlyLamportChange) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(14) serializer.DecreaseContainerDepth() return nil @@ -634,25 +707,30 @@ func (obj *InstructionError__ReadonlyLamportChange) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ReadonlyLamportChange(deserializer serde.Deserializer) (InstructionError__ReadonlyLamportChange, error) { var obj InstructionError__ReadonlyLamportChange - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ReadonlyDataModified struct { -} +type InstructionError__ReadonlyDataModified struct{} func (*InstructionError__ReadonlyDataModified) isInstructionError() {} func (obj *InstructionError__ReadonlyDataModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(15) serializer.DecreaseContainerDepth() return nil @@ -662,25 +740,30 @@ func (obj *InstructionError__ReadonlyDataModified) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ReadonlyDataModified(deserializer serde.Deserializer) (InstructionError__ReadonlyDataModified, error) { var obj InstructionError__ReadonlyDataModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__DuplicateAccountIndex struct { -} +type InstructionError__DuplicateAccountIndex struct{} func (*InstructionError__DuplicateAccountIndex) isInstructionError() {} func (obj *InstructionError__DuplicateAccountIndex) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(16) serializer.DecreaseContainerDepth() return nil @@ -690,25 +773,30 @@ func (obj *InstructionError__DuplicateAccountIndex) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__DuplicateAccountIndex(deserializer serde.Deserializer) (InstructionError__DuplicateAccountIndex, error) { var obj InstructionError__DuplicateAccountIndex - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__ExecutableModified struct { -} +type InstructionError__ExecutableModified struct{} func (*InstructionError__ExecutableModified) isInstructionError() {} func (obj *InstructionError__ExecutableModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(17) serializer.DecreaseContainerDepth() return nil @@ -718,25 +806,30 @@ func (obj *InstructionError__ExecutableModified) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__ExecutableModified(deserializer serde.Deserializer) (InstructionError__ExecutableModified, error) { var obj InstructionError__ExecutableModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__RentEpochModified struct { -} +type InstructionError__RentEpochModified struct{} func (*InstructionError__RentEpochModified) isInstructionError() {} func (obj *InstructionError__RentEpochModified) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(18) serializer.DecreaseContainerDepth() return nil @@ -746,25 +839,30 @@ func (obj *InstructionError__RentEpochModified) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__RentEpochModified(deserializer serde.Deserializer) (InstructionError__RentEpochModified, error) { var obj InstructionError__RentEpochModified - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__NotEnoughAccountKeys struct { -} +type InstructionError__NotEnoughAccountKeys struct{} func (*InstructionError__NotEnoughAccountKeys) isInstructionError() {} func (obj *InstructionError__NotEnoughAccountKeys) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(19) serializer.DecreaseContainerDepth() return nil @@ -774,25 +872,30 @@ func (obj *InstructionError__NotEnoughAccountKeys) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__NotEnoughAccountKeys(deserializer serde.Deserializer) (InstructionError__NotEnoughAccountKeys, error) { var obj InstructionError__NotEnoughAccountKeys - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountDataSizeChanged struct { -} +type InstructionError__AccountDataSizeChanged struct{} func (*InstructionError__AccountDataSizeChanged) isInstructionError() {} func (obj *InstructionError__AccountDataSizeChanged) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(20) serializer.DecreaseContainerDepth() return nil @@ -802,25 +905,30 @@ func (obj *InstructionError__AccountDataSizeChanged) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountDataSizeChanged(deserializer serde.Deserializer) (InstructionError__AccountDataSizeChanged, error) { var obj InstructionError__AccountDataSizeChanged - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountNotExecutable struct { -} +type InstructionError__AccountNotExecutable struct{} func (*InstructionError__AccountNotExecutable) isInstructionError() {} func (obj *InstructionError__AccountNotExecutable) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(21) serializer.DecreaseContainerDepth() return nil @@ -830,25 +938,30 @@ func (obj *InstructionError__AccountNotExecutable) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountNotExecutable(deserializer serde.Deserializer) (InstructionError__AccountNotExecutable, error) { var obj InstructionError__AccountNotExecutable - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountBorrowFailed struct { -} +type InstructionError__AccountBorrowFailed struct{} func (*InstructionError__AccountBorrowFailed) isInstructionError() {} func (obj *InstructionError__AccountBorrowFailed) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(22) serializer.DecreaseContainerDepth() return nil @@ -858,25 +971,30 @@ func (obj *InstructionError__AccountBorrowFailed) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountBorrowFailed(deserializer serde.Deserializer) (InstructionError__AccountBorrowFailed, error) { var obj InstructionError__AccountBorrowFailed - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__AccountBorrowOutstanding struct { -} +type InstructionError__AccountBorrowOutstanding struct{} func (*InstructionError__AccountBorrowOutstanding) isInstructionError() {} func (obj *InstructionError__AccountBorrowOutstanding) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(23) serializer.DecreaseContainerDepth() return nil @@ -886,25 +1004,30 @@ func (obj *InstructionError__AccountBorrowOutstanding) BincodeSerialize() ([]byt if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__AccountBorrowOutstanding(deserializer serde.Deserializer) (InstructionError__AccountBorrowOutstanding, error) { var obj InstructionError__AccountBorrowOutstanding - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type InstructionError__DuplicateAccountOutOfSync struct { -} +type InstructionError__DuplicateAccountOutOfSync struct{} func (*InstructionError__DuplicateAccountOutOfSync) isInstructionError() {} func (obj *InstructionError__DuplicateAccountOutOfSync) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(24) serializer.DecreaseContainerDepth() return nil @@ -914,14 +1037,18 @@ func (obj *InstructionError__DuplicateAccountOutOfSync) BincodeSerialize() ([]by if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__DuplicateAccountOutOfSync(deserializer serde.Deserializer) (InstructionError__DuplicateAccountOutOfSync, error) { var obj InstructionError__DuplicateAccountOutOfSync - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -931,9 +1058,13 @@ type InstructionError__CustomError uint32 func (*InstructionError__CustomError) isInstructionError() {} func (obj *InstructionError__CustomError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(25) - if err := serializer.SerializeU32(((uint32)(*obj))); err != nil { return err } + if err := serializer.SerializeU32(((uint32)(*obj))); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -942,26 +1073,35 @@ func (obj *InstructionError__CustomError) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__CustomError(deserializer serde.Deserializer) (InstructionError__CustomError, error) { var obj uint32 - if err := deserializer.IncreaseContainerDepth(); err != nil { return (InstructionError__CustomError)(obj), err } - if val, err := deserializer.DeserializeU32(); err == nil { obj = val } else { return ((InstructionError__CustomError)(obj)), err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return (InstructionError__CustomError)(obj), err + } + if val, err := deserializer.DeserializeU32(); err == nil { + obj = val + } else { + return ((InstructionError__CustomError)(obj)), err + } deserializer.DecreaseContainerDepth() return (InstructionError__CustomError)(obj), nil } -type InstructionError__InvalidError struct { -} +type InstructionError__InvalidError struct{} func (*InstructionError__InvalidError) isInstructionError() {} func (obj *InstructionError__InvalidError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(26) serializer.DecreaseContainerDepth() return nil @@ -971,14 +1111,18 @@ func (obj *InstructionError__InvalidError) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_InstructionError__InvalidError(deserializer serde.Deserializer) (InstructionError__InvalidError, error) { var obj InstructionError__InvalidError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -991,7 +1135,9 @@ type Result interface { func DeserializeResult(deserializer serde.Deserializer) (Result, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -1018,7 +1164,7 @@ func BincodeDeserializeResult(input []byte) (Result, error) { var obj Result return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeResult(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -1026,14 +1172,18 @@ func BincodeDeserializeResult(input []byte) (Result, error) { return obj, err } -type Result__Ok struct {} +type Result__Ok struct{} func (*Result__Ok) isResult() {} func (obj *Result__Ok) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) - if err := serializer.SerializeUnit(((struct {})(*obj))); err != nil { return err } + if err := serializer.SerializeUnit(((struct{})(*obj))); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1042,15 +1192,23 @@ func (obj *Result__Ok) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_Result__Ok(deserializer serde.Deserializer) (Result__Ok, error) { - var obj struct {} - if err := deserializer.IncreaseContainerDepth(); err != nil { return (Result__Ok)(obj), err } - if val, err := deserializer.DeserializeUnit(); err == nil { obj = val } else { return ((Result__Ok)(obj)), err } + var obj struct{} + if err := deserializer.IncreaseContainerDepth(); err != nil { + return (Result__Ok)(obj), err + } + if val, err := deserializer.DeserializeUnit(); err == nil { + obj = val + } else { + return ((Result__Ok)(obj)), err + } deserializer.DecreaseContainerDepth() return (Result__Ok)(obj), nil } @@ -1062,9 +1220,13 @@ type Result__Err struct { func (*Result__Err) isResult() {} func (obj *Result__Err) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) - if err := obj.Value.Serialize(serializer); err != nil { return err } + if err := obj.Value.Serialize(serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1073,15 +1235,23 @@ func (obj *Result__Err) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_Result__Err(deserializer serde.Deserializer) (Result__Err, error) { var obj Result__Err - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := DeserializeTransactionError(deserializer); err == nil { obj.Value = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := DeserializeTransactionError(deserializer); err == nil { + obj.Value = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1094,7 +1264,9 @@ type TransactionError interface { func DeserializeTransactionError(deserializer serde.Deserializer) (TransactionError, error) { index, err := deserializer.DeserializeVariantIndex() - if err != nil { return nil, err } + if err != nil { + return nil, err + } switch index { case 0: @@ -1205,7 +1377,7 @@ func BincodeDeserializeTransactionError(input []byte) (TransactionError, error) var obj TransactionError return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeTransactionError(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { return obj, fmt.Errorf("Some input bytes were not read") @@ -1213,13 +1385,14 @@ func BincodeDeserializeTransactionError(input []byte) (TransactionError, error) return obj, err } -type TransactionError__AccountInUse struct { -} +type TransactionError__AccountInUse struct{} func (*TransactionError__AccountInUse) isTransactionError() {} func (obj *TransactionError__AccountInUse) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(0) serializer.DecreaseContainerDepth() return nil @@ -1229,25 +1402,30 @@ func (obj *TransactionError__AccountInUse) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountInUse(deserializer serde.Deserializer) (TransactionError__AccountInUse, error) { var obj TransactionError__AccountInUse - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__AccountLoadedTwice struct { -} +type TransactionError__AccountLoadedTwice struct{} func (*TransactionError__AccountLoadedTwice) isTransactionError() {} func (obj *TransactionError__AccountLoadedTwice) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(1) serializer.DecreaseContainerDepth() return nil @@ -1257,25 +1435,30 @@ func (obj *TransactionError__AccountLoadedTwice) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountLoadedTwice(deserializer serde.Deserializer) (TransactionError__AccountLoadedTwice, error) { var obj TransactionError__AccountLoadedTwice - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__AccountNotFound struct { -} +type TransactionError__AccountNotFound struct{} func (*TransactionError__AccountNotFound) isTransactionError() {} func (obj *TransactionError__AccountNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(2) serializer.DecreaseContainerDepth() return nil @@ -1285,25 +1468,30 @@ func (obj *TransactionError__AccountNotFound) BincodeSerialize() ([]byte, error) if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__AccountNotFound(deserializer serde.Deserializer) (TransactionError__AccountNotFound, error) { var obj TransactionError__AccountNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__ProgramAccountNotFound struct { -} +type TransactionError__ProgramAccountNotFound struct{} func (*TransactionError__ProgramAccountNotFound) isTransactionError() {} func (obj *TransactionError__ProgramAccountNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(3) serializer.DecreaseContainerDepth() return nil @@ -1313,25 +1501,30 @@ func (obj *TransactionError__ProgramAccountNotFound) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__ProgramAccountNotFound(deserializer serde.Deserializer) (TransactionError__ProgramAccountNotFound, error) { var obj TransactionError__ProgramAccountNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InsufficientFundsForFee struct { -} +type TransactionError__InsufficientFundsForFee struct{} func (*TransactionError__InsufficientFundsForFee) isTransactionError() {} func (obj *TransactionError__InsufficientFundsForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(4) serializer.DecreaseContainerDepth() return nil @@ -1341,25 +1534,30 @@ func (obj *TransactionError__InsufficientFundsForFee) BincodeSerialize() ([]byte if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InsufficientFundsForFee(deserializer serde.Deserializer) (TransactionError__InsufficientFundsForFee, error) { var obj TransactionError__InsufficientFundsForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidAccountForFee struct { -} +type TransactionError__InvalidAccountForFee struct{} func (*TransactionError__InvalidAccountForFee) isTransactionError() {} func (obj *TransactionError__InvalidAccountForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(5) serializer.DecreaseContainerDepth() return nil @@ -1369,25 +1567,30 @@ func (obj *TransactionError__InvalidAccountForFee) BincodeSerialize() ([]byte, e if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidAccountForFee(deserializer serde.Deserializer) (TransactionError__InvalidAccountForFee, error) { var obj TransactionError__InvalidAccountForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__DuplicateSignature struct { -} +type TransactionError__DuplicateSignature struct{} func (*TransactionError__DuplicateSignature) isTransactionError() {} func (obj *TransactionError__DuplicateSignature) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(6) serializer.DecreaseContainerDepth() return nil @@ -1397,25 +1600,30 @@ func (obj *TransactionError__DuplicateSignature) BincodeSerialize() ([]byte, err if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__DuplicateSignature(deserializer serde.Deserializer) (TransactionError__DuplicateSignature, error) { var obj TransactionError__DuplicateSignature - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__BlockhashNotFound struct { -} +type TransactionError__BlockhashNotFound struct{} func (*TransactionError__BlockhashNotFound) isTransactionError() {} func (obj *TransactionError__BlockhashNotFound) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(7) serializer.DecreaseContainerDepth() return nil @@ -1425,14 +1633,18 @@ func (obj *TransactionError__BlockhashNotFound) BincodeSerialize() ([]byte, erro if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__BlockhashNotFound(deserializer serde.Deserializer) (TransactionError__BlockhashNotFound, error) { var obj TransactionError__BlockhashNotFound - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1445,10 +1657,16 @@ type TransactionError__InstructionError struct { func (*TransactionError__InstructionError) isTransactionError() {} func (obj *TransactionError__InstructionError) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(8) - if err := serializer.SerializeU8(obj.Field0); err != nil { return err } - if err := obj.Field1.Serialize(serializer); err != nil { return err } + if err := serializer.SerializeU8(obj.Field0); err != nil { + return err + } + if err := obj.Field1.Serialize(serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1457,27 +1675,40 @@ func (obj *TransactionError__InstructionError) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InstructionError(deserializer serde.Deserializer) (TransactionError__InstructionError, error) { var obj TransactionError__InstructionError - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := deserializer.DeserializeU8(); err == nil { obj.Field0 = val } else { return obj, err } - if val, err := DeserializeInstructionError(deserializer); err == nil { obj.Field1 = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := deserializer.DeserializeU8(); err == nil { + obj.Field0 = val + } else { + return obj, err + } + if val, err := DeserializeInstructionError(deserializer); err == nil { + obj.Field1 = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__CallChainTooDeep struct { -} +type TransactionError__CallChainTooDeep struct{} func (*TransactionError__CallChainTooDeep) isTransactionError() {} func (obj *TransactionError__CallChainTooDeep) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(9) serializer.DecreaseContainerDepth() return nil @@ -1487,25 +1718,30 @@ func (obj *TransactionError__CallChainTooDeep) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__CallChainTooDeep(deserializer serde.Deserializer) (TransactionError__CallChainTooDeep, error) { var obj TransactionError__CallChainTooDeep - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__MissingSignatureForFee struct { -} +type TransactionError__MissingSignatureForFee struct{} func (*TransactionError__MissingSignatureForFee) isTransactionError() {} func (obj *TransactionError__MissingSignatureForFee) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(10) serializer.DecreaseContainerDepth() return nil @@ -1515,25 +1751,30 @@ func (obj *TransactionError__MissingSignatureForFee) BincodeSerialize() ([]byte, if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__MissingSignatureForFee(deserializer serde.Deserializer) (TransactionError__MissingSignatureForFee, error) { var obj TransactionError__MissingSignatureForFee - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidAccountIndex struct { -} +type TransactionError__InvalidAccountIndex struct{} func (*TransactionError__InvalidAccountIndex) isTransactionError() {} func (obj *TransactionError__InvalidAccountIndex) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(11) serializer.DecreaseContainerDepth() return nil @@ -1543,25 +1784,30 @@ func (obj *TransactionError__InvalidAccountIndex) BincodeSerialize() ([]byte, er if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidAccountIndex(deserializer serde.Deserializer) (TransactionError__InvalidAccountIndex, error) { var obj TransactionError__InvalidAccountIndex - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__SignatureFailure struct { -} +type TransactionError__SignatureFailure struct{} func (*TransactionError__SignatureFailure) isTransactionError() {} func (obj *TransactionError__SignatureFailure) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(12) serializer.DecreaseContainerDepth() return nil @@ -1571,25 +1817,30 @@ func (obj *TransactionError__SignatureFailure) BincodeSerialize() ([]byte, error if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__SignatureFailure(deserializer serde.Deserializer) (TransactionError__SignatureFailure, error) { var obj TransactionError__SignatureFailure - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } -type TransactionError__InvalidProgramForExecution struct { -} +type TransactionError__InvalidProgramForExecution struct{} func (*TransactionError__InvalidProgramForExecution) isTransactionError() {} func (obj *TransactionError__InvalidProgramForExecution) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } serializer.SerializeVariantIndex(13) serializer.DecreaseContainerDepth() return nil @@ -1599,31 +1850,45 @@ func (obj *TransactionError__InvalidProgramForExecution) BincodeSerialize() ([]b if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func load_TransactionError__InvalidProgramForExecution(deserializer serde.Deserializer) (TransactionError__InvalidProgramForExecution, error) { var obj TransactionError__InvalidProgramForExecution - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } type TransactionStatusMeta struct { - Status Result - Fee uint64 - PreBalances []uint64 + Status Result + Fee uint64 + PreBalances []uint64 PostBalances []uint64 } func (obj *TransactionStatusMeta) Serialize(serializer serde.Serializer) error { - if err := serializer.IncreaseContainerDepth(); err != nil { return err } - if err := obj.Status.Serialize(serializer); err != nil { return err } - if err := serializer.SerializeU64(obj.Fee); err != nil { return err } - if err := serialize_vector_u64(obj.PreBalances, serializer); err != nil { return err } - if err := serialize_vector_u64(obj.PostBalances, serializer); err != nil { return err } + if err := serializer.IncreaseContainerDepth(); err != nil { + return err + } + if err := obj.Status.Serialize(serializer); err != nil { + return err + } + if err := serializer.SerializeU64(obj.Fee); err != nil { + return err + } + if err := serialize_vector_u64(obj.PreBalances, serializer); err != nil { + return err + } + if err := serialize_vector_u64(obj.PostBalances, serializer); err != nil { + return err + } serializer.DecreaseContainerDepth() return nil } @@ -1632,18 +1897,38 @@ func (obj *TransactionStatusMeta) BincodeSerialize() ([]byte, error) { if obj == nil { return nil, fmt.Errorf("Cannot serialize null object") } - serializer := bincode.NewSerializer(); - if err := obj.Serialize(serializer); err != nil { return nil, err } + serializer := bincode.NewSerializer() + if err := obj.Serialize(serializer); err != nil { + return nil, err + } return serializer.GetBytes(), nil } func DeserializeTransactionStatusMeta(deserializer serde.Deserializer) (TransactionStatusMeta, error) { var obj TransactionStatusMeta - if err := deserializer.IncreaseContainerDepth(); err != nil { return obj, err } - if val, err := DeserializeResult(deserializer); err == nil { obj.Status = val } else { return obj, err } - if val, err := deserializer.DeserializeU64(); err == nil { obj.Fee = val } else { return obj, err } - if val, err := deserialize_vector_u64(deserializer); err == nil { obj.PreBalances = val } else { return obj, err } - if val, err := deserialize_vector_u64(deserializer); err == nil { obj.PostBalances = val } else { return obj, err } + if err := deserializer.IncreaseContainerDepth(); err != nil { + return obj, err + } + if val, err := DeserializeResult(deserializer); err == nil { + obj.Status = val + } else { + return obj, err + } + if val, err := deserializer.DeserializeU64(); err == nil { + obj.Fee = val + } else { + return obj, err + } + if val, err := deserialize_vector_u64(deserializer); err == nil { + obj.PreBalances = val + } else { + return obj, err + } + if val, err := deserialize_vector_u64(deserializer); err == nil { + obj.PostBalances = val + } else { + return obj, err + } deserializer.DecreaseContainerDepth() return obj, nil } @@ -1653,28 +1938,45 @@ func BincodeDeserializeTransactionStatusMeta(input []byte) (TransactionStatusMet var obj TransactionStatusMeta return obj, fmt.Errorf("Cannot deserialize null array") } - deserializer := bincode.NewDeserializer(input); + deserializer := bincode.NewDeserializer(input) obj, err := DeserializeTransactionStatusMeta(deserializer) if err == nil && deserializer.GetBufferOffset() < uint64(len(input)) { - return obj, fmt.Errorf("Some input bytes were not read") + // return obj, fmt.Errorf("Some input bytes were not read") + // TODO: fix this + klog.Warningf( + "Parsed %d bytes, but input was %d bytes (%d bytes not read)", + deserializer.GetBufferOffset(), + len(input), + len(input)-int(deserializer.GetBufferOffset()), + ) } return obj, err } + func serialize_vector_u64(value []uint64, serializer serde.Serializer) error { - if err := serializer.SerializeLen(uint64(len(value))); err != nil { return err } - for _, item := range(value) { - if err := serializer.SerializeU64(item); err != nil { return err } + if err := serializer.SerializeLen(uint64(len(value))); err != nil { + return err + } + for _, item := range value { + if err := serializer.SerializeU64(item); err != nil { + return err + } } return nil } func deserialize_vector_u64(deserializer serde.Deserializer) ([]uint64, error) { length, err := deserializer.DeserializeLen() - if err != nil { return nil, err } + if err != nil { + return nil, err + } obj := make([]uint64, length) - for i := range(obj) { - if val, err := deserializer.DeserializeU64(); err == nil { obj[i] = val } else { return nil, err } + for i := range obj { + if val, err := deserializer.DeserializeU64(); err == nil { + obj[i] = val + } else { + return nil, err + } } return obj, nil } - diff --git a/range-cache.go b/range-cache.go new file mode 100644 index 00000000..5e3bee2d --- /dev/null +++ b/range-cache.go @@ -0,0 +1,267 @@ +package main + +import ( + "context" + "fmt" + "os" + "sync" + "time" +) + +type RangeCache struct { + mu sync.RWMutex + // the size of the file. + size int64 + name string + + occupiedSpace uint64 + remoteFetcher func(p []byte, off int64) (n int, err error) + + cache map[Range]RangeCacheEntry +} + +type RangeCacheEntry struct { + Value []byte + LastRead time.Time +} + +type Range [2]int64 // [start, end) + +// contains returns true if the given range is contained in this range. +func (r Range) contains(r2 Range) bool { + return r[0] <= r2[0] && r[1] >= r2[1] +} + +func (r Range) isContainedIn(r2 Range) bool { + return r2.contains(r) +} + +func (r Range) isValidFor(size int64) bool { + return r[0] >= 0 && r[1] <= size && r[0] <= r[1] +} + +// NewRangeCache creates a new RangeCache. +func NewRangeCache( + size int64, + name string, + fetcher func(p []byte, off int64) (n int, err error), +) *RangeCache { + if fetcher == nil { + panic("fetcher must not be nil") + } + return &RangeCache{ + size: size, + name: name, + cache: make(map[Range]RangeCacheEntry), + remoteFetcher: fetcher, + } +} + +func (rc *RangeCache) Size() int64 { + return rc.size +} + +func (rc *RangeCache) OccupiedSpace() uint64 { + return rc.occupiedSpace +} + +func (rc *RangeCache) Close() error { + rc.mu.Lock() + defer rc.mu.Unlock() + rc.cache = nil + return nil +} + +// StartCacheGC starts a goroutine that will delete old cache entries. +func (rc *RangeCache) StartCacheGC(ctx context.Context, maxAge time.Duration) { + go func() { + t := time.NewTicker(maxAge) + defer t.Stop() + for { + select { + case <-t.C: + rc.DeleteOldEntries(ctx, maxAge) + case <-ctx.Done(): + return + } + } + }() +} + +func (rc *RangeCache) DeleteOldEntries(ctx context.Context, maxAge time.Duration) { + rc.mu.Lock() + defer rc.mu.Unlock() + for r, e := range rc.cache { + if ctx.Err() != nil { + return + } + if time.Since(e.LastRead) > maxAge { + delete(rc.cache, r) + rc.occupiedSpace -= uint64(len(e.Value)) + } + } +} + +// SetRange sets the range [start, end) to the given value. +func (rc *RangeCache) SetRange(ctx context.Context, start, ln int64, value []byte) error { + rc.mu.Lock() + defer rc.mu.Unlock() + return rc.setRange(ctx, start, ln, value) +} + +func (rc *RangeCache) setRange(ctx context.Context, start, ln int64, value []byte) error { + end := start + ln + if start < 0 || end > rc.size || start > end { + return fmt.Errorf("invalid range: [%d, %d)", start, end) + } + if len(value) != int(end-start) { + return fmt.Errorf("invalid value length: %d", len(value)) + } + { + for r, rv := range rc.cache { + if ctx.Err() != nil { + return ctx.Err() + } + // check if one of the ranges in the cache contains the requested range. + if r.contains(Range{start, end}) { + debugLn("there's already a cache entry for this or a superset of this range") + return nil + } + // check if the requested range contains one of the ranges in the cache. + if (Range{start, end}).contains(r) { + debugLn("deleting a subset of this range") + delete(rc.cache, r) + rc.occupiedSpace -= uint64(len(rv.Value)) + } + } + } + rc.cache[Range{start, end}] = RangeCacheEntry{ + Value: value, + LastRead: time.Now(), + } + rc.occupiedSpace += uint64(len(value)) + return nil +} + +// GetRange gets the range [start, end) from the given reader. +func (rc *RangeCache) GetRange(ctx context.Context, start, ln int64) ([]byte, error) { + end := start + ln + got, err := rc.getRange(ctx, start, end, func() ([]byte, error) { + v := make([]byte, end-start) + debugf( + orange("[cache-MISS] reading from source %s: start=%d end=%d len=%d\n"), + rc.name, + start, + end, + end-start, + ) + _, err := rc.remoteFetcher(v, start) + if err == nil { + cloned := clone(v) + rc.setRange(ctx, start, ln, cloned) + } + return v, err + }) + if err != nil { + return nil, err + } + if len(got) != int(end-start) { + return nil, fmt.Errorf("invalid length: %d", len(got)) + } + return got, nil +} + +func debugLn(a ...interface{}) { + if DebugMode { + fmt.Fprintln(os.Stderr, a...) + } +} + +func debugf(format string, a ...interface{}) { + if DebugMode { + fmt.Fprintf(os.Stderr, format, a...) + } +} + +var DebugMode = false + +func orange(s string) string { + return "\033[38;5;208m" + s + "\033[0m" +} + +func lime(s string) string { + return "\033[38;5;118m" + s + "\033[0m" +} + +func (rc *RangeCache) getRange(ctx context.Context, start, end int64, miss func() ([]byte, error)) ([]byte, error) { + if start < 0 || end > rc.size || start > end { + return nil, fmt.Errorf("invalid range: [%d, %d)", start, end) + } + if end-start > rc.size { + return nil, fmt.Errorf("range too large: %d", end-start) + } + v, ok, err := rc.getRangeFromCache(ctx, start, end) + if err != nil { + return nil, err + } + if ok { + return v, nil + } + rc.mu.Lock() + defer rc.mu.Unlock() + return miss() +} + +// getRangeFromCache gets the range [start, end) from the cache. +// It will look for an exact match first, then for a superset of the requested range. +func (rc *RangeCache) getRangeFromCache(ctx context.Context, start, end int64) ([]byte, bool, error) { + rc.mu.RLock() + defer rc.mu.RUnlock() + if len(rc.cache) == 0 { + return nil, false, nil + } + if v, ok := rc.cache[Range{start, end}]; ok { + debugf( + lime("[exact-cache-HIT] for %s: start=%d end=%d len=%d\n"), + rc.name, + start, + end, + end-start, + ) + return clone(v.Value), true, nil + } + { + // check if we have a cache entry that is a superset of the requested range. + for r := range rc.cache { + if ctx.Err() != nil { + return nil, false, ctx.Err() + } + if r.contains(Range{start, end}) { + debugf( + lime("[cache-HIT] range superset in %s: start=%d end=%d len=%d\n"), + rc.name, + start, + end, + end-start, + ) + return clone(rc.cache[r].Value[start-r[0] : end-r[0]]), true, nil + } + } + } + return nil, false, nil +} + +func clone(b []byte) []byte { + if b == nil { + return nil + } + c := make([]byte, len(b)) + copy(c, b) + return c +} + +// ReaderAtSeeker is the interface that groups the basic ReadAt and Seek methods. +type ReaderAtSeeker interface { + ReadAt(p []byte, off int64) (n int, err error) + Seek(offset int64, whence int) (int64, error) +} diff --git a/range-cache_test.go b/range-cache_test.go new file mode 100644 index 00000000..794e0a9e --- /dev/null +++ b/range-cache_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCache(t *testing.T) { + t.Run("basic", func(t *testing.T) { + v := []byte("hello") + full := append(v, []byte(" world")...) + rd := bytes.NewReader(full) + rc := NewRangeCache( + int64(len(full)), + "test", + func(p []byte, off int64) (n int, err error) { + return rd.ReadAt(p, off) + }) + + { + { + err := rc.SetRange(context.Background(), 0, int64(len(v)), v) + require.NoError(t, err) + err = rc.SetRange(context.Background(), 1, 1, []byte("e")) + require.NoError(t, err) + } + ///// + { + got, err := rc.GetRange(context.Background(), 1, 3) + require.NoError(t, err) + require.Equal(t, []byte("ell"), got) + } + { + got, err := rc.GetRange(context.Background(), 1, 7) + require.NoError(t, err) + require.Equal(t, []byte("ello wo"), got) + } + } + }) +} diff --git a/readahead/readahead.go b/readahead/readahead.go new file mode 100644 index 00000000..a804bb71 --- /dev/null +++ b/readahead/readahead.go @@ -0,0 +1,94 @@ +package readahead + +import ( + "bytes" + "fmt" + "io" + "os" +) + +const ( + KiB = 1024 + MiB = 1024 * KiB +) + +const DefaultChunkSize = 12 * MiB + +type CachingReader struct { + file io.ReadCloser + buffer *bytes.Buffer + chunkSize int +} + +// NewCachingReader returns a reader that reads from the given file, but caches +// the last chunkSize bytes in memory. This is useful for reading CAR files +// because the CAR format is optimized for sequential reads, but the CAR reader +// needs to first read the object size before reading the object data. +func NewCachingReader(filePath string, chunkSize int) (*CachingReader, error) { + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + chunkSize = alignValueToPageSize(chunkSize) + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + return &CachingReader{file: file, buffer: new(bytes.Buffer), chunkSize: chunkSize}, nil +} + +func NewCachingReaderFromReader(file io.ReadCloser, chunkSize int) (*CachingReader, error) { + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + chunkSize = alignValueToPageSize(chunkSize) + return &CachingReader{file: file, buffer: new(bytes.Buffer), chunkSize: chunkSize}, nil +} + +func alignValueToPageSize(value int) int { + pageSize := os.Getpagesize() + return (value + pageSize - 1) &^ (pageSize - 1) +} + +func (cr *CachingReader) Read(p []byte) (int, error) { + if cr.file == nil { + return 0, fmt.Errorf("file not open") + } + if len(p) == 0 { + return 0, nil + } + + if len(p) > cr.chunkSize { + // read what we can from the buffer + n := copy(p, cr.buffer.Next(cr.chunkSize)) + // read the rest directly from the file + n2, err := cr.file.Read(p[n:]) + if err != nil && err != io.EOF { + return 0, fmt.Errorf("failed to read from file: %w", err) + } + return n + n2, nil + } + + // Refill the buffer if needed + if cr.buffer.Len() < len(p) { + tmp := make([]byte, cr.chunkSize) + n, err := cr.file.Read(tmp) + if err != nil && err != io.EOF { + return 0, fmt.Errorf("failed to read from file: %w", err) + } + if n > 0 { + cr.buffer.Write(tmp[:n]) + } + if err == io.EOF && cr.buffer.Len() == 0 { + // If EOF is reached and buffer is empty, return EOF + return 0, io.EOF + } + } + + // Read and discard bytes from the buffer + n := copy(p, cr.buffer.Next(len(p))) + return n, nil +} + +func (cr *CachingReader) Close() error { + return cr.file.Close() +} diff --git a/readers.go b/readers.go index cf640417..5fb53e1e 100644 --- a/readers.go +++ b/readers.go @@ -13,6 +13,7 @@ import ( "github.com/ipfs/go-libipfs/blocks" carv1 "github.com/ipld/go-car" "github.com/ipld/go-car/util" + "github.com/rpcpool/yellowstone-faithful/readahead" ) func readHeader(br io.Reader) (*carv1.CarHeader, error) { @@ -34,8 +35,12 @@ type carReader struct { header *carv1.CarHeader } -func newCarReader(r io.Reader) (*carReader, error) { - br := bufio.NewReader(r) +func newCarReader(r io.ReadCloser) (*carReader, error) { + cachingReader, err := readahead.NewCachingReaderFromReader(r, readahead.DefaultChunkSize) + if err != nil { + return nil, fmt.Errorf("failed to create caching reader: %s", err) + } + br := bufio.NewReader(cachingReader) ch, err := readHeader(br) if err != nil { return nil, err @@ -81,12 +86,12 @@ func readNodeInfoWithoutData(br *bufio.Reader) (cid.Cid, uint64, error) { func readNodeInfoWithData(br *bufio.Reader) (cid.Cid, uint64, []byte, error) { sectionLen, ll, err := readSectionLength(br) if err != nil { - return cid.Cid{}, 0, nil, err + return cid.Cid{}, 0, nil, fmt.Errorf("failed to read section length: %w", err) } cidLen, c, err := cid.CidFromReader(br) if err != nil { - return cid.Cid{}, 0, nil, err + return cid.Cid{}, 0, nil, fmt.Errorf("failed to read cid: %w", err) } // Seek to the next section by skipping the block. @@ -96,7 +101,7 @@ func readNodeInfoWithData(br *bufio.Reader) (cid.Cid, uint64, []byte, error) { buf := make([]byte, remainingSectionLen) _, err = io.ReadFull(br, buf) if err != nil { - return cid.Cid{}, 0, nil, err + return cid.Cid{}, 0, nil, fmt.Errorf("failed to read block: %w", err) } return c, sectionLen + ll, buf, nil @@ -117,13 +122,16 @@ func (b *byteReaderWithCounter) ReadByte() (byte, error) { func readSectionLength(r *bufio.Reader) (uint64, uint64, error) { if _, err := r.Peek(1); err != nil { // no more blocks, likely clean io.EOF - return 0, 0, err + if errors.Is(err, io.ErrNoProgress) { + return 0, 0, io.EOF + } + return 0, 0, fmt.Errorf("failed to peek: %w", err) } br := byteReaderWithCounter{r, 0} l, err := binary.ReadUvarint(&br) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return 0, 0, io.ErrUnexpectedEOF // don't silently pretend this is a clean EOF } return 0, 0, err @@ -147,11 +155,11 @@ func (cr *carReader) NextInfo() (cid.Cid, uint64, error) { func (cr *carReader) NextNode() (cid.Cid, uint64, *blocks.BasicBlock, error) { c, sectionLen, data, err := readNodeInfoWithData(cr.br) if err != nil { - return c, 0, nil, err + return c, 0, nil, fmt.Errorf("failed to read node info: %w", err) } bl, err := blocks.NewBlockWithCid(data, c) if err != nil { - return c, 0, nil, err + return c, 0, nil, fmt.Errorf("failed to create block: %w", err) } return c, sectionLen, bl, nil } diff --git a/request-response.go b/request-response.go new file mode 100644 index 00000000..561fc45f --- /dev/null +++ b/request-response.go @@ -0,0 +1,432 @@ +package main + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "strings" + + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + jsoniter "github.com/json-iterator/go" + "github.com/mostynb/zstdpool-freelist" + "github.com/mr-tron/base58" + "github.com/sourcegraph/jsonrpc2" + "github.com/valyala/fasthttp" +) + +type requestContext struct { + ctx *fasthttp.RequestCtx +} + +// ReplyWithError(ctx context.Context, id ID, respErr *Error) error { +func (c *requestContext) ReplyWithError(ctx context.Context, id jsonrpc2.ID, respErr *jsonrpc2.Error) error { + resp := &jsonrpc2.Response{ + ID: id, + Error: respErr, + } + replyJSON(c.ctx, http.StatusOK, resp) + return nil +} + +func toMapAny(v any) (map[string]any, error) { + b, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(v) + if err != nil { + return nil, err + } + var m map[string]any + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +// MapToCamelCase converts a map[string]interface{} to a map[string]interface{} with camelCase keys +func MapToCamelCase(m map[string]any) map[string]any { + newMap := make(map[string]any) + for k, v := range m { + newMap[toLowerCamelCase(k)] = MapToCamelCaseAny(v) + } + return newMap +} + +func MapToCamelCaseAny(m any) any { + if m == nil { + return nil + } + if m, ok := m.(map[string]any); ok { + return MapToCamelCase(m) + } + // if array, convert each element + if m, ok := m.([]any); ok { + for i, v := range m { + m[i] = MapToCamelCaseAny(v) + } + } + return m +} + +func toLowerCamelCase(v string) string { + pascal := bin.ToPascalCase(v) + if len(pascal) == 0 { + return "" + } + if len(pascal) == 1 { + return strings.ToLower(pascal) + } + return strings.ToLower(pascal[:1]) + pascal[1:] +} + +// Reply sends a response to the client with the given result. +// The result fields keys are converted to camelCase. +// If remapCallback is not nil, it is called with the result map[string]interface{}. +func (c *requestContext) Reply( + ctx context.Context, + id jsonrpc2.ID, + result interface{}, + remapCallback func(map[string]any) map[string]any, +) error { + mm, err := toMapAny(result) + if err != nil { + return err + } + result = MapToCamelCaseAny(mm) + if remapCallback != nil { + if mp, ok := result.(map[string]any); ok { + result = remapCallback(mp) + } + } + resRaw, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(result) + if err != nil { + return err + } + raw := json.RawMessage(resRaw) + resp := &jsonrpc2.Response{ + ID: id, + Result: &raw, + } + replyJSON(c.ctx, http.StatusOK, resp) + return err +} + +// ReplyRaw sends a raw response without any processing (no camelCase conversion, etc). +func (c *requestContext) ReplyRaw( + ctx context.Context, + id jsonrpc2.ID, + result interface{}, +) error { + resRaw, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(result) + if err != nil { + return err + } + raw := json.RawMessage(resRaw) + resp := &jsonrpc2.Response{ + ID: id, + Result: &raw, + } + replyJSON(c.ctx, http.StatusOK, resp) + return err +} + +func putValueIntoContext(ctx context.Context, key, value interface{}) context.Context { + return context.WithValue(ctx, key, value) +} + +func getValueFromContext(ctx context.Context, key interface{}) interface{} { + return ctx.Value(key) +} + +// WithSubrapghPrefetch sets the prefetch flag in the context +// to enable prefetching of subgraphs. +func WithSubrapghPrefetch(ctx context.Context, yesNo bool) context.Context { + return putValueIntoContext(ctx, "prefetch", yesNo) +} + +type GetBlockRequest struct { + Slot uint64 `json:"slot"` + Options struct { + Commitment *rpc.CommitmentType `json:"commitment,omitempty"` // default: "finalized" + Encoding *solana.EncodingType `json:"encoding,omitempty"` // default: "json" + MaxSupportedTransactionVersion *uint64 `json:"maxSupportedTransactionVersion,omitempty"` + TransactionDetails *string `json:"transactionDetails,omitempty"` // default: "full" + Rewards *bool `json:"rewards,omitempty"` + } `json:"options,omitempty"` +} + +// Validate validates the request. +func (req *GetBlockRequest) Validate() error { + if req.Options.Encoding != nil && !isAnyEncodingOf( + *req.Options.Encoding, + solana.EncodingBase58, + solana.EncodingBase64, + solana.EncodingBase64Zstd, + solana.EncodingJSON, + // solana.EncodingJSONParsed, // TODO: add support for this + ) { + return fmt.Errorf("unsupported encoding") + } + return nil +} + +func parseGetBlockRequest(raw *json.RawMessage) (*GetBlockRequest, error) { + var params []any + if err := json.Unmarshal(*raw, ¶ms); err != nil { + return nil, fmt.Errorf("failed to unmarshal params: %w", err) + } + if len(params) < 1 { + return nil, fmt.Errorf("params must have at least one argument") + } + slotRaw, ok := params[0].(float64) + if !ok { + return nil, fmt.Errorf("first argument must be a number, got %T", params[0]) + } + + out := &GetBlockRequest{ + Slot: uint64(slotRaw), + } + + if len(params) > 1 { + optionsRaw, ok := params[1].(map[string]any) + if !ok { + return nil, fmt.Errorf("second argument must be an object, got %T", params[1]) + } + if commitmentRaw, ok := optionsRaw["commitment"]; ok { + commitment, ok := commitmentRaw.(string) + if !ok { + return nil, fmt.Errorf("commitment must be a string, got %T", commitmentRaw) + } + commitmentType := rpc.CommitmentType(commitment) + out.Options.Commitment = &commitmentType + } else { + commitmentType := defaultCommitment() + out.Options.Commitment = &commitmentType + } + if encodingRaw, ok := optionsRaw["encoding"]; ok { + encoding, ok := encodingRaw.(string) + if !ok { + return nil, fmt.Errorf("encoding must be a string, got %T", encodingRaw) + } + encodingType := solana.EncodingType(encoding) + out.Options.Encoding = &encodingType + } else { + encodingType := defaultEncoding() + out.Options.Encoding = &encodingType + } + if maxSupportedTransactionVersionRaw, ok := optionsRaw["maxSupportedTransactionVersion"]; ok { + // TODO: add support for this, and validate the value. + maxSupportedTransactionVersion, ok := maxSupportedTransactionVersionRaw.(float64) + if !ok { + return nil, fmt.Errorf("maxSupportedTransactionVersion must be a number, got %T", maxSupportedTransactionVersionRaw) + } + maxSupportedTransactionVersionUint64 := uint64(maxSupportedTransactionVersion) + out.Options.MaxSupportedTransactionVersion = &maxSupportedTransactionVersionUint64 + } + if transactionDetailsRaw, ok := optionsRaw["transactionDetails"]; ok { + // TODO: add support for this, and validate the value. + transactionDetails, ok := transactionDetailsRaw.(string) + if !ok { + return nil, fmt.Errorf("transactionDetails must be a string, got %T", transactionDetailsRaw) + } + out.Options.TransactionDetails = &transactionDetails + } else { + transactionDetails := defaultTransactionDetails() + out.Options.TransactionDetails = &transactionDetails + } + if rewardsRaw, ok := optionsRaw["rewards"]; ok { + rewards, ok := rewardsRaw.(bool) + if !ok { + return nil, fmt.Errorf("rewards must be a boolean, got %T", rewardsRaw) + } + out.Options.Rewards = &rewards + } else { + rewards := true + out.Options.Rewards = &rewards + } + } else { + // set defaults: + commitmentType := defaultCommitment() + out.Options.Commitment = &commitmentType + encodingType := defaultEncoding() + out.Options.Encoding = &encodingType + transactionDetails := defaultTransactionDetails() + out.Options.TransactionDetails = &transactionDetails + rewards := true + out.Options.Rewards = &rewards + } + + return out, nil +} + +func defaultCommitment() rpc.CommitmentType { + return rpc.CommitmentFinalized +} + +func defaultEncoding() solana.EncodingType { + return solana.EncodingJSON +} + +func defaultTransactionDetails() string { + return "full" +} + +type GetTransactionRequest struct { + Signature solana.Signature `json:"signature"` + Options struct { + Encoding *solana.EncodingType `json:"encoding,omitempty"` // default: "json" + MaxSupportedTransactionVersion *uint64 `json:"maxSupportedTransactionVersion,omitempty"` + Commitment *rpc.CommitmentType `json:"commitment,omitempty"` + } `json:"options,omitempty"` +} + +// Validate validates the request. +func (req *GetTransactionRequest) Validate() error { + if req.Signature.IsZero() { + return fmt.Errorf("signature is required") + } + if req.Options.Encoding != nil && !isAnyEncodingOf( + *req.Options.Encoding, + solana.EncodingBase58, + solana.EncodingBase64, + solana.EncodingBase64Zstd, + solana.EncodingJSON, + // solana.EncodingJSONParsed, // TODO: add support for this + ) { + return fmt.Errorf("unsupported encoding") + } + return nil +} + +func isAnyEncodingOf(s solana.EncodingType, anyOf ...solana.EncodingType) bool { + for _, v := range anyOf { + if s == v { + return true + } + } + return false +} + +func parseGetTransactionRequest(raw *json.RawMessage) (*GetTransactionRequest, error) { + var params []any + if err := json.Unmarshal(*raw, ¶ms); err != nil { + return nil, fmt.Errorf("failed to unmarshal params: %w", err) + } + if len(params) < 1 { + return nil, fmt.Errorf("params must have at least one argument") + } + sigRaw, ok := params[0].(string) + if !ok { + return nil, fmt.Errorf("first argument must be a string, got %T", params[0]) + } + + sig, err := solana.SignatureFromBase58(sigRaw) + if err != nil { + return nil, fmt.Errorf("failed to parse signature from base58: %w", err) + } + + out := &GetTransactionRequest{ + Signature: sig, + } + + if len(params) > 1 { + optionsRaw, ok := params[1].(map[string]any) + if !ok { + return nil, fmt.Errorf("second argument must be an object, got %T", params[1]) + } + if encodingRaw, ok := optionsRaw["encoding"]; ok { + encoding, ok := encodingRaw.(string) + if !ok { + return nil, fmt.Errorf("encoding must be a string, got %T", encodingRaw) + } + encodingType := solana.EncodingType(encoding) + out.Options.Encoding = &encodingType + } else { + encodingType := defaultEncoding() + out.Options.Encoding = &encodingType + } + if maxSupportedTransactionVersionRaw, ok := optionsRaw["maxSupportedTransactionVersion"]; ok { + // TODO: add support for this, and validate the value. + maxSupportedTransactionVersion, ok := maxSupportedTransactionVersionRaw.(float64) + if !ok { + return nil, fmt.Errorf("maxSupportedTransactionVersion must be a number, got %T", maxSupportedTransactionVersionRaw) + } + maxSupportedTransactionVersionUint64 := uint64(maxSupportedTransactionVersion) + out.Options.MaxSupportedTransactionVersion = &maxSupportedTransactionVersionUint64 + } + if commitmentRaw, ok := optionsRaw["commitment"]; ok { + commitment, ok := commitmentRaw.(string) + if !ok { + return nil, fmt.Errorf("commitment must be a string, got %T", commitmentRaw) + } + commitmentType := rpc.CommitmentType(commitment) + out.Options.Commitment = &commitmentType + } + } else { + // set defaults: + encodingType := defaultEncoding() + out.Options.Encoding = &encodingType + } + + return out, nil +} + +var zstdEncoderPool = zstdpool.NewEncoderPool() + +func encodeTransactionResponseBasedOnWantedEncoding( + encoding solana.EncodingType, + tx solana.Transaction, +) (any, error) { + switch encoding { + case solana.EncodingBase58, solana.EncodingBase64, solana.EncodingBase64Zstd: + txBuf, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal transaction: %w", err) + } + return encodeBytesResponseBasedOnWantedEncoding(encoding, txBuf) + case solana.EncodingJSONParsed: + return nil, fmt.Errorf("unsupported encoding") + case solana.EncodingJSON: + // TODO: add support for this + return tx, nil + default: + return nil, fmt.Errorf("unsupported encoding") + } +} + +func encodeBytesResponseBasedOnWantedEncoding( + encoding solana.EncodingType, + buf []byte, +) ([]any, error) { + switch encoding { + case solana.EncodingBase58: + return []any{base58.Encode(buf), encoding}, nil + case solana.EncodingBase64: + return []any{base64.StdEncoding.EncodeToString(buf), encoding}, nil + case solana.EncodingBase64Zstd: + enc, err := zstdEncoderPool.Get(nil) + if err != nil { + return nil, fmt.Errorf("failed to get zstd encoder: %w", err) + } + defer zstdEncoderPool.Put(enc) + return []any{base64.StdEncoding.EncodeToString(enc.EncodeAll(buf, nil)), encoding}, nil + default: + return nil, fmt.Errorf("unsupported encoding %q", encoding) + } +} + +func parseGetBlockTimeRequest(raw *json.RawMessage) (uint64, error) { + var params []any + if err := json.Unmarshal(*raw, ¶ms); err != nil { + return 0, fmt.Errorf("failed to unmarshal params: %w", err) + } + if len(params) < 1 { + return 0, fmt.Errorf("params must have at least one argument") + } + blockRaw, ok := params[0].(float64) + if !ok { + return 0, fmt.Errorf("first argument must be a number, got %T", params[0]) + } + return uint64(blockRaw), nil +} diff --git a/solana-block-rewards/rewards.go b/solana-block-rewards/rewards.go index 846d53aa..8bbd2024 100644 --- a/solana-block-rewards/rewards.go +++ b/solana-block-rewards/rewards.go @@ -1,8 +1,8 @@ package solanablockrewards import ( - "github.com/golang/protobuf/proto" "github.com/rpcpool/yellowstone-faithful/third_party/solana_proto/confirmed_block" + "google.golang.org/protobuf/proto" ) func ParseRewards(buf []byte) (*confirmed_block.Rewards, error) { diff --git a/storage.go b/storage.go new file mode 100644 index 00000000..4474d3c4 --- /dev/null +++ b/storage.go @@ -0,0 +1,233 @@ +package main + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "strings" + + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car/util" + carv2 "github.com/ipld/go-car/v2" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/rpcpool/yellowstone-faithful/ipld/ipldbindcode" + solanatxmetaparsers "github.com/rpcpool/yellowstone-faithful/solana-tx-meta-parsers" + "golang.org/x/exp/mmap" + "k8s.io/klog/v2" +) + +// openIndexStorage open a compactindex from a local file, or from a remote URL. +// Supported protocols are: +// - http:// +// - https:// +func openIndexStorage( + ctx context.Context, + where string, + debug bool, +) (ReaderAtCloser, error) { + where = strings.TrimSpace(where) + if strings.HasPrefix(where, "http://") || strings.HasPrefix(where, "https://") { + klog.Infof("opening index file from %q as HTTP remote file", where) + rac, err := remoteHTTPFileAsIoReaderAt(ctx, where) + if err != nil { + return nil, fmt.Errorf("failed to open remote index file: %w", err) + } + if !debug { + return rac, nil + } + return &readCloserWrapper{ + rac: rac, + name: where, + isRemote: true, + }, nil + } + // TODO: add support for IPFS gateways. + // TODO: add support for Filecoin gateways. + rac, err := mmap.Open(where) + if err != nil { + return nil, fmt.Errorf("failed to open local index file: %w", err) + } + if !debug { + return rac, nil + } + return &readCloserWrapper{ + rac: rac, + name: where, + isRemote: false, + }, nil +} + +func openCarStorage(ctx context.Context, where string) (*carv2.Reader, ReaderAtCloser, error) { + where = strings.TrimSpace(where) + if strings.HasPrefix(where, "http://") || strings.HasPrefix(where, "https://") { + klog.Infof("opening CAR file from %q as HTTP remote file", where) + rem, err := remoteHTTPFileAsIoReaderAt(ctx, where) + if err != nil { + return nil, nil, fmt.Errorf("failed to open remote CAR file: %w", err) + } + return nil, &readCloserWrapper{ + rac: rem, + name: where, + }, nil + } + // TODO: add support for IPFS gateways. + // TODO: add support for Filecoin gateways. + + carReader, err := carv2.OpenReader(where) + if err != nil { + return nil, nil, fmt.Errorf("failed to open CAR file: %w", err) + } + return carReader, nil, nil +} + +func readSectionFromReaderAt(reader ReaderAtCloser, offset uint64, length uint64) ([]byte, error) { + data := make([]byte, length) + _, err := reader.ReadAt(data, int64(offset)) + if err != nil { + return nil, err + } + return data, nil +} + +func readNodeFromReaderAt(reader ReaderAtCloser, wantedCid cid.Cid, offset uint64) ([]byte, error) { + // read MaxVarintLen64 bytes + lenBuf := make([]byte, binary.MaxVarintLen64) + _, err := reader.ReadAt(lenBuf, int64(offset)) + if err != nil { + return nil, err + } + // read uvarint + dataLen, n := binary.Uvarint(lenBuf) + offset += uint64(n) + if dataLen > uint64(util.MaxAllowedSectionSize) { // Don't OOM + return nil, errors.New("malformed car; header is bigger than util.MaxAllowedSectionSize") + } + data := make([]byte, dataLen) + _, err = reader.ReadAt(data, int64(offset)) + if err != nil { + return nil, err + } + + n, gotCid, err := cid.CidFromReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + // verify that the CID we read matches the one we expected. + if !gotCid.Equals(wantedCid) { + return nil, fmt.Errorf("CID mismatch: expected %s, got %s", wantedCid, gotCid) + } + return data[n:], nil +} + +type GetBlockResponse struct { + BlockHeight *uint64 `json:"blockHeight"` + BlockTime *uint64 `json:"blockTime"` + Blockhash string `json:"blockhash"` + ParentSlot uint64 `json:"parentSlot"` + PreviousBlockhash *string `json:"previousBlockhash"` + Rewards any `json:"rewards"` // TODO: use same format as solana + Transactions []GetTransactionResponse `json:"transactions"` +} + +type GetTransactionResponse struct { + // TODO: use same format as solana + Blocktime *uint64 `json:"blockTime,omitempty"` + Meta any `json:"meta"` + Slot *uint64 `json:"slot,omitempty"` + Transaction any `json:"transaction"` + Version any `json:"version"` + Position uint64 `json:"-"` // TODO: enable this + Signatures []solana.Signature `json:"-"` // TODO: enable this +} + +func loadDataFromDataFrames( + firstDataFrame *ipldbindcode.DataFrame, + dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), +) ([]byte, error) { + dataBuffer := new(bytes.Buffer) + allFrames, err := getAllFramesFromDataFrame(firstDataFrame, dataFrameGetter) + if err != nil { + return nil, err + } + for _, frame := range allFrames { + dataBuffer.Write(frame.Bytes()) + } + // verify the data hash (if present) + bufHash, ok := firstDataFrame.GetHash() + if !ok { + return dataBuffer.Bytes(), nil + } + err = ipldbindcode.VerifyHash(dataBuffer.Bytes(), bufHash) + if err != nil { + return nil, err + } + return dataBuffer.Bytes(), nil +} + +func getAllFramesFromDataFrame( + firstDataFrame *ipldbindcode.DataFrame, + dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), +) ([]*ipldbindcode.DataFrame, error) { + frames := []*ipldbindcode.DataFrame{firstDataFrame} + // get the next data frames + next, ok := firstDataFrame.GetNext() + if !ok || len(next) == 0 { + return frames, nil + } + for _, cid := range next { + nextDataFrame, err := dataFrameGetter(context.Background(), cid.(cidlink.Link).Cid) + if err != nil { + return nil, err + } + nextFrames, err := getAllFramesFromDataFrame(nextDataFrame, dataFrameGetter) + if err != nil { + return nil, err + } + frames = append(frames, nextFrames...) + } + return frames, nil +} + +func parseTransactionAndMetaFromNode( + transactionNode *ipldbindcode.Transaction, + dataFrameGetter func(ctx context.Context, wantedCid cid.Cid) (*ipldbindcode.DataFrame, error), +) (tx solana.Transaction, meta any, _ error) { + { + transactionBuffer, err := loadDataFromDataFrames(&transactionNode.Data, dataFrameGetter) + if err != nil { + return solana.Transaction{}, nil, err + } + if err := bin.UnmarshalBin(&tx, transactionBuffer); err != nil { + klog.Errorf("failed to unmarshal transaction: %v", err) + return solana.Transaction{}, nil, err + } else if len(tx.Signatures) == 0 { + klog.Errorf("transaction has no signatures") + return solana.Transaction{}, nil, err + } + } + + { + metaBuffer, err := loadDataFromDataFrames(&transactionNode.Metadata, dataFrameGetter) + if err != nil { + return solana.Transaction{}, nil, err + } + if len(metaBuffer) > 0 { + uncompressedMeta, err := decompressZstd(metaBuffer) + if err != nil { + klog.Errorf("failed to decompress metadata: %v", err) + return + } + status, err := solanatxmetaparsers.ParseAnyTransactionStatusMeta(uncompressedMeta) + if err != nil { + klog.Errorf("failed to parse metadata: %v", err) + return + } + meta = status + } + } + return +} diff --git a/gsfa/store/LICENSE.md b/store/LICENSE.md similarity index 100% rename from gsfa/store/LICENSE.md rename to store/LICENSE.md diff --git a/gsfa/store/filecache/filecache.go b/store/filecache/filecache.go similarity index 100% rename from gsfa/store/filecache/filecache.go rename to store/filecache/filecache.go diff --git a/gsfa/store/filecache/filecache_test.go b/store/filecache/filecache_test.go similarity index 100% rename from gsfa/store/filecache/filecache_test.go rename to store/filecache/filecache_test.go diff --git a/gsfa/store/freelist/freelist.go b/store/freelist/freelist.go similarity index 98% rename from gsfa/store/freelist/freelist.go rename to store/freelist/freelist.go index 5f8872ab..78661311 100644 --- a/gsfa/store/freelist/freelist.go +++ b/store/freelist/freelist.go @@ -13,7 +13,7 @@ import ( "os" "sync" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" ) const CIDSizePrefix = 4 diff --git a/gsfa/store/freelist/freelist_test.go b/store/freelist/freelist_test.go similarity index 94% rename from gsfa/store/freelist/freelist_test.go rename to store/freelist/freelist_test.go index 03a8cb77..50410f84 100644 --- a/gsfa/store/freelist/freelist_test.go +++ b/store/freelist/freelist_test.go @@ -12,8 +12,8 @@ import ( "path/filepath" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/buckets.go b/store/index/buckets.go similarity index 93% rename from gsfa/store/index/buckets.go rename to store/index/buckets.go index 783915d3..c3a22d90 100644 --- a/gsfa/store/index/buckets.go +++ b/store/index/buckets.go @@ -1,6 +1,6 @@ package index -import "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" +import "github.com/rpcpool/yellowstone-faithful/store/types" // BucketIndex is an index to a bucket type BucketIndex uint32 diff --git a/gsfa/store/index/buckets_test.go b/store/index/buckets_test.go similarity index 92% rename from gsfa/store/index/buckets_test.go rename to store/index/buckets_test.go index 039a442b..9c1ce162 100644 --- a/gsfa/store/index/buckets_test.go +++ b/store/index/buckets_test.go @@ -3,8 +3,8 @@ package index_test import ( "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/index" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/index" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/gc.go b/store/index/gc.go similarity index 99% rename from gsfa/store/index/gc.go rename to store/index/gc.go index a0863ac4..d9e6c8b2 100644 --- a/gsfa/store/index/gc.go +++ b/store/index/gc.go @@ -15,7 +15,7 @@ import ( "time" logging "github.com/ipfs/go-log/v2" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" ) var log = logging.Logger("storethehash/index") diff --git a/gsfa/store/index/gc_test.go b/store/index/gc_test.go similarity index 98% rename from gsfa/store/index/gc_test.go rename to store/index/gc_test.go index f126cf67..12e56858 100644 --- a/gsfa/store/index/gc_test.go +++ b/store/index/gc_test.go @@ -8,7 +8,7 @@ import ( "path/filepath" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/filecache" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/header.go b/store/index/header.go similarity index 100% rename from gsfa/store/index/header.go rename to store/index/header.go diff --git a/gsfa/store/index/index.go b/store/index/index.go similarity index 99% rename from gsfa/store/index/index.go rename to store/index/index.go index f63fb7ad..83a68779 100644 --- a/gsfa/store/index/index.go +++ b/store/index/index.go @@ -17,10 +17,10 @@ import ( "sync" "time" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary/gsfaprimary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/primary" + "github.com/rpcpool/yellowstone-faithful/store/primary/gsfaprimary" + "github.com/rpcpool/yellowstone-faithful/store/types" ) /* An append-only log [`recordlist`]s. diff --git a/gsfa/store/index/index_test.go b/store/index/index_test.go similarity index 99% rename from gsfa/store/index/index_test.go rename to store/index/index_test.go index 59e1c2bc..341932d1 100644 --- a/gsfa/store/index/index_test.go +++ b/store/index/index_test.go @@ -5,9 +5,9 @@ import ( "path/filepath" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary/inmemory" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/primary/inmemory" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/recordlist.go b/store/index/recordlist.go similarity index 99% rename from gsfa/store/index/recordlist.go rename to store/index/recordlist.go index 0f23da72..66ecf9b9 100644 --- a/gsfa/store/index/recordlist.go +++ b/store/index/recordlist.go @@ -10,7 +10,7 @@ import ( "encoding/binary" "io" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" ) // BucketPrefixSize is how many bytes of bucket prefixes are stored. diff --git a/gsfa/store/index/recordlist_test.go b/store/index/recordlist_test.go similarity index 98% rename from gsfa/store/index/recordlist_test.go rename to store/index/recordlist_test.go index d89b7e0c..35f4f576 100644 --- a/gsfa/store/index/recordlist_test.go +++ b/store/index/recordlist_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/index" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/index" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/upgrade.go b/store/index/upgrade.go similarity index 98% rename from gsfa/store/index/upgrade.go rename to store/index/upgrade.go index 905a0de5..5d6ce441 100644 --- a/gsfa/store/index/upgrade.go +++ b/store/index/upgrade.go @@ -13,7 +13,7 @@ import ( "io" "os" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" ) func upgradeIndex(ctx context.Context, name, headerPath string, maxFileSize uint32) error { diff --git a/gsfa/store/index/upgrade_test.go b/store/index/upgrade_test.go similarity index 98% rename from gsfa/store/index/upgrade_test.go rename to store/index/upgrade_test.go index 5d343c9f..b00baf36 100644 --- a/gsfa/store/index/upgrade_test.go +++ b/store/index/upgrade_test.go @@ -10,7 +10,7 @@ import ( "path/filepath" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/index/valuestore_test/storethehash.index b/store/index/valuestore_test/storethehash.index similarity index 100% rename from gsfa/store/index/valuestore_test/storethehash.index rename to store/index/valuestore_test/storethehash.index diff --git a/gsfa/store/index/valuestore_test/storethehash.index.free b/store/index/valuestore_test/storethehash.index.free similarity index 100% rename from gsfa/store/index/valuestore_test/storethehash.index.free rename to store/index/valuestore_test/storethehash.index.free diff --git a/gsfa/store/iterator.go b/store/iterator.go similarity index 95% rename from gsfa/store/iterator.go rename to store/iterator.go index 7b9e64c2..10d458cb 100644 --- a/gsfa/store/iterator.go +++ b/store/iterator.go @@ -8,7 +8,7 @@ package store import ( "io" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/index" + "github.com/rpcpool/yellowstone-faithful/store/index" ) // Iterator iterates keys and values. Any write to the store potentially diff --git a/gsfa/store/option.go b/store/option.go similarity index 97% rename from gsfa/store/option.go rename to store/option.go index a2011394..99a3c5af 100644 --- a/gsfa/store/option.go +++ b/store/option.go @@ -8,7 +8,7 @@ package store import ( "time" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/types" ) const ( diff --git a/gsfa/store/primary/gsfaprimary/gsfaprimary.go b/store/primary/gsfaprimary/gsfaprimary.go similarity index 96% rename from gsfa/store/primary/gsfaprimary/gsfaprimary.go rename to store/primary/gsfaprimary/gsfaprimary.go index e009f625..5779d404 100644 --- a/gsfa/store/primary/gsfaprimary/gsfaprimary.go +++ b/store/primary/gsfaprimary/gsfaprimary.go @@ -17,10 +17,10 @@ import ( "github.com/gagliardetto/solana-go" logging "github.com/ipfs/go-log/v2" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/primary" + "github.com/rpcpool/yellowstone-faithful/store/types" ) var log = logging.Logger("storethehash/gsfaprimary") @@ -46,6 +46,7 @@ const ( // TODO: replace deletedBit with a byte? or do the same thing deletedBit = uint32(1 << 31) ) +const primaryRecordSize = 32 + 8 // A primary storage that is multihash aware. type GsfaPrimary struct { @@ -88,6 +89,13 @@ func newBlockPool() blockPool { } } +func _clone(b []byte) []byte { + if b == nil { + return nil + } + return append(b[:0:0], b...) +} + // Open opens the gsfa primary storage file. The primary is created if // there is no existing primary at the specified path. If there is an older // version primary, then it is automatically upgraded. @@ -255,7 +263,7 @@ func readPubkey(buf []byte) (Pubkey, int, error) { // files. func (cp *GsfaPrimary) Put(key []byte, value []byte) (types.Block, error) { recSize := int64(len(key) + len(value)) - dataSize := 32 + 8 + dataSize := primaryRecordSize if recSize != int64(dataSize) { return types.Block{}, fmt.Errorf("expected record size %d, got %d", dataSize, recSize) } @@ -275,7 +283,7 @@ func (cp *GsfaPrimary) Put(key []byte, value []byte) (types.Block, error) { cp.recPos += types.Position(dataSize) cp.nextPool.refs[blk] = len(cp.nextPool.blocks) - cp.nextPool.blocks = append(cp.nextPool.blocks, blockRecord{key, value}) + cp.nextPool.blocks = append(cp.nextPool.blocks, blockRecord{_clone(key), _clone(value)}) cp.outstandingWork += types.Work(dataSize) return blk, nil } @@ -302,7 +310,7 @@ func (cp *GsfaPrimary) Overwrite(blk types.Block, key []byte, value []byte) erro if _, err = fi.WriteAt(payload, int64(localPos)); err != nil { return fmt.Errorf("error writing data to gsfa primary: %w", err) } - cp.upgradeCachedValue(blk, key, value) + cp.upgradeCachedValue(blk, _clone(key), _clone(value)) return nil } @@ -482,7 +490,7 @@ func (iter *Iterator) Next() ([]byte, []byte, error) { iter.pos = 0 } - size := 32 + 8 + size := primaryRecordSize pos := iter.pos data := make([]byte, size) _, err := iter.file.ReadAt(data, pos) diff --git a/gsfa/store/primary/gsfaprimary/gsfaprimary_test.go b/store/primary/gsfaprimary/gsfaprimary_test.go similarity index 88% rename from gsfa/store/primary/gsfaprimary/gsfaprimary_test.go rename to store/primary/gsfaprimary/gsfaprimary_test.go index d3264649..b4df5c42 100644 --- a/gsfa/store/primary/gsfaprimary/gsfaprimary_test.go +++ b/store/primary/gsfaprimary/gsfaprimary_test.go @@ -11,10 +11,10 @@ import ( "testing" "github.com/gagliardetto/solana-go" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary/gsfaprimary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/testutil" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/primary/gsfaprimary" + "github.com/rpcpool/yellowstone-faithful/store/testutil" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) @@ -30,8 +30,8 @@ func TestIndexPut(t *testing.T) { blks := testutil.GenerateEntries(5) expectedOffset := types.Position(0) for _, blk := range blks { - expectedSize := len(blk.Key.Bytes()) + len(blk.RawValue()) - loc, err := primaryStorage.Put(blk.Key.Bytes(), blk.RawValue()) + expectedSize := len(blk.Key[:]) + len(blk.Value) + loc, err := primaryStorage.Put(blk.Key[:], blk.Value) require.NoError(t, err) require.Equal(t, expectedOffset, loc.Offset) require.Equal(t, types.Size(expectedSize), loc.Size) @@ -90,7 +90,7 @@ func TestIndexGet(t *testing.T) { blks := testutil.GenerateEntries(5) var locs []types.Block for _, blk := range blks { - loc, err := primaryStorage.Put(blk.Key.Bytes(), blk.Value) + loc, err := primaryStorage.Put(blk.Key[:], blk.Value) require.NoError(t, err) locs = append(locs, loc) } @@ -136,7 +136,7 @@ func TestFlushRace(t *testing.T) { // load blocks blks := testutil.GenerateEntries(5) for _, blk := range blks { - _, err := primaryStorage.Put(blk.Key.Bytes(), blk.Value) + _, err := primaryStorage.Put(blk.Key[:], blk.Value) require.NoError(t, err) } @@ -167,7 +167,7 @@ func TestFlushExcess(t *testing.T) { // load blocks blks := testutil.GenerateEntries(5) for _, blk := range blks { - _, err := primaryStorage.Put(blk.Key.Bytes(), blk.Value) + _, err := primaryStorage.Put(blk.Key[:], blk.Value) require.NoError(t, err) } @@ -177,7 +177,7 @@ func TestFlushExcess(t *testing.T) { blks = testutil.GenerateEntries(5) for _, blk := range blks { - _, err := primaryStorage.Put(blk.Key.Bytes(), blk.Value) + _, err := primaryStorage.Put(blk.Key[:], blk.Value) require.NoError(t, err) } diff --git a/gsfa/store/primary/gsfaprimary/header.go b/store/primary/gsfaprimary/header.go similarity index 100% rename from gsfa/store/primary/gsfaprimary/header.go rename to store/primary/gsfaprimary/header.go diff --git a/gsfa/store/primary/gsfaprimary/upgrade.go b/store/primary/gsfaprimary/upgrade.go similarity index 98% rename from gsfa/store/primary/gsfaprimary/upgrade.go rename to store/primary/gsfaprimary/upgrade.go index 39f9bf85..9fea0086 100644 --- a/gsfa/store/primary/gsfaprimary/upgrade.go +++ b/store/primary/gsfaprimary/upgrade.go @@ -14,8 +14,8 @@ import ( "os" "path/filepath" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/types" ) type IndexRemapper struct { diff --git a/gsfa/store/primary/gsfaprimary/upgrade_test.go b/store/primary/gsfaprimary/upgrade_test.go similarity index 95% rename from gsfa/store/primary/gsfaprimary/upgrade_test.go rename to store/primary/gsfaprimary/upgrade_test.go index f8060ee8..0b99202b 100644 --- a/gsfa/store/primary/gsfaprimary/upgrade_test.go +++ b/store/primary/gsfaprimary/upgrade_test.go @@ -15,9 +15,9 @@ import ( "path/filepath" "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/primary/gsfaprimary/valuestore_test/storethehash.data b/store/primary/gsfaprimary/valuestore_test/storethehash.data similarity index 100% rename from gsfa/store/primary/gsfaprimary/valuestore_test/storethehash.data rename to store/primary/gsfaprimary/valuestore_test/storethehash.data diff --git a/gsfa/store/primary/inmemory/inmemory.go b/store/primary/inmemory/inmemory.go similarity index 94% rename from gsfa/store/primary/inmemory/inmemory.go rename to store/primary/inmemory/inmemory.go index f3b6111f..b61fda8c 100644 --- a/gsfa/store/primary/inmemory/inmemory.go +++ b/store/primary/inmemory/inmemory.go @@ -8,8 +8,8 @@ package inmemory import ( "io" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/primary" + "github.com/rpcpool/yellowstone-faithful/store/types" ) //! In-memory primary storage implementation. diff --git a/gsfa/store/primary/inmemory/inmemory_test.go b/store/primary/inmemory/inmemory_test.go similarity index 93% rename from gsfa/store/primary/inmemory/inmemory_test.go rename to store/primary/inmemory/inmemory_test.go index f77e60bc..1e656072 100644 --- a/gsfa/store/primary/inmemory/inmemory_test.go +++ b/store/primary/inmemory/inmemory_test.go @@ -8,8 +8,8 @@ package inmemory_test import ( "testing" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary/inmemory" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/primary/inmemory" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) diff --git a/gsfa/store/primary/primary.go b/store/primary/primary.go similarity index 95% rename from gsfa/store/primary/primary.go rename to store/primary/primary.go index 87f95ab5..c3fb7d0d 100644 --- a/gsfa/store/primary/primary.go +++ b/store/primary/primary.go @@ -5,7 +5,7 @@ package primary // // Copyright 2020 IPLD Team and various authors and contributors // See LICENSE for details. -import "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" +import "github.com/rpcpool/yellowstone-faithful/store/types" // PrimaryStorage is an interface for storing and retrieving key value pairs on disk type PrimaryStorage interface { diff --git a/store/primary/sig2epochprimary/header.go b/store/primary/sig2epochprimary/header.go new file mode 100644 index 00000000..54ad7287 --- /dev/null +++ b/store/primary/sig2epochprimary/header.go @@ -0,0 +1,53 @@ +package sig2epochprimary + +// Copyright 2023 rpcpool +// This file has been modified by github.com/gagliardetto +// +// Copyright 2020 IPLD Team and various authors and contributors +// See LICENSE for details. +import ( + "encoding/json" + "os" +) + +// Header contains information about the primary. This is actually stored in a +// separate ".info" file, but is the first file read when the index is opened. +type Header struct { + // A version number in case we change the header + Version int + // MaxFileSize is the size limit of each index file. This cannot be greater + // than 4GiB. + MaxFileSize uint32 + // First index file number + FirstFile uint32 +} + +func newHeader(maxFileSize uint32) Header { + return Header{ + Version: PrimaryVersion, + MaxFileSize: maxFileSize, + } +} + +func readHeader(filePath string) (Header, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return Header{}, err + } + + var header Header + err = json.Unmarshal(data, &header) + if err != nil { + return Header{}, err + } + + return header, nil +} + +func writeHeader(headerPath string, header Header) error { + data, err := json.Marshal(&header) + if err != nil { + return err + } + return os.WriteFile(headerPath, data, 0o666) +} diff --git a/store/primary/sig2epochprimary/sig2epochprimary.go b/store/primary/sig2epochprimary/sig2epochprimary.go new file mode 100644 index 00000000..e2c20c1b --- /dev/null +++ b/store/primary/sig2epochprimary/sig2epochprimary.go @@ -0,0 +1,594 @@ +package sig2epochprimary + +// Copyright 2023 rpcpool +// This file has been modified by github.com/gagliardetto +// +// Copyright 2020 IPLD Team and various authors and contributors +// See LICENSE for details. +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/gagliardetto/solana-go" + logging "github.com/ipfs/go-log/v2" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/primary" + "github.com/rpcpool/yellowstone-faithful/store/types" +) + +var log = logging.Logger("storethehash/gsfaprimary") + +const ( + // PrimaryVersion is stored in the header data to indicate how to interpret + // primary data. + PrimaryVersion = 1 + + // defaultMaxFileSize is largest the max file size is allowed to be. + defaultMaxFileSize = uint32(1024 * 1024 * 1024) + + // blockBufferSize is the size of primary I/O buffers. If has the same size + // as the linux pipe size. + blockBufferSize = 16 * 4096 + // blockPoolSize is the size of the primary cache. + blockPoolSize = 1024 + + // TODO: remove sizePrefixSize anywhere it is used. + sizePrefixSize = 1 + + // TODO: remove deletedBit anywhere it is used. + // TODO: replace deletedBit with a byte? or do the same thing + deletedBit = uint32(1 << 31) +) +const primaryRecordSize = 64 + 2 + +type SigToEpochPrimary struct { + basePath string + file *os.File + headerPath string + maxFileSize uint32 + writer *bufio.Writer + outstandingWork types.Work + curPool, nextPool blockPool + poolLk sync.RWMutex + flushLock sync.Mutex + fileCache *filecache.FileCache + + // fileNum and length track flushed data. + fileNum uint32 + length types.Position + + // recFileNum and recPos track where each record will be written when they + // are flushed to disk. + recFileNum uint32 + recPos types.Position + + closed bool +} + +type blockRecord struct { + key []byte + value []byte +} +type blockPool struct { + refs map[types.Block]int + blocks []blockRecord +} + +func newBlockPool() blockPool { + return blockPool{ + refs: make(map[types.Block]int, blockPoolSize), + blocks: make([]blockRecord, 0, blockPoolSize), + } +} + +// Open opens the gsfa primary storage file. The primary is created if +// there is no existing primary at the specified path. If there is an older +// version primary, then it is automatically upgraded. +func Open(path string, freeList *freelist.FreeList, fileCache *filecache.FileCache, maxFileSize uint32) (*SigToEpochPrimary, error) { + headerPath := filepath.Clean(path) + ".info" + + if maxFileSize == 0 { + maxFileSize = defaultMaxFileSize + } else if maxFileSize > defaultMaxFileSize { + return nil, fmt.Errorf("maximum primary file size cannot exceed %d", defaultMaxFileSize) + } + + var lastPrimaryNum uint32 + header, err := readHeader(headerPath) + if os.IsNotExist(err) { + // If header does not exist, then upgrade primary. + lastPrimaryNum, err = upgradePrimary(context.Background(), path, headerPath, maxFileSize, freeList) + if err != nil { + return nil, fmt.Errorf("error upgrading primary: %w", err) + } + + // Header does not exist, so create new one. + header = newHeader(maxFileSize) + if err = writeHeader(headerPath, header); err != nil { + return nil, err + } + } else { + if err != nil { + return nil, err + } + + if header.MaxFileSize != maxFileSize { + return nil, types.ErrPrimaryWrongFileSize{header.MaxFileSize, maxFileSize} + } + + // Find last primary file. + lastPrimaryNum, err = findLastPrimary(path, header.FirstFile) + if err != nil { + return nil, err + } + } + + file, err := os.OpenFile(primaryFileName(path, lastPrimaryNum), os.O_RDWR|os.O_APPEND|os.O_CREATE, 0o644) + if err != nil { + return nil, err + } + length, err := file.Seek(0, io.SeekEnd) + if err != nil { + return nil, err + } + + mp := &SigToEpochPrimary{ + basePath: path, + file: file, + fileCache: fileCache, + headerPath: headerPath, + maxFileSize: maxFileSize, + writer: bufio.NewWriterSize(file, blockBufferSize), + curPool: newBlockPool(), + nextPool: newBlockPool(), + + fileNum: lastPrimaryNum, + length: types.Position(length), + + recFileNum: lastPrimaryNum, + recPos: types.Position(length), + } + + return mp, nil +} + +func (cp *SigToEpochPrimary) FileSize() uint32 { + return cp.maxFileSize +} + +// upgradeCachedValue updates the cached value for the given key if it exists. +// This is used to make sure that the cached value is updated when a new value +// is written to the primary (overwriting the old value), otherwise the cached +// value will be stale. +func (cp *SigToEpochPrimary) upgradeCachedValue(blk types.Block, key []byte, value []byte) { + idx, ok := cp.nextPool.refs[blk] + if ok { + if !bytes.Equal(cp.nextPool.blocks[idx].key, key) { + return + } + cp.nextPool.blocks[idx].value = value + } + idx, ok = cp.curPool.refs[blk] + if ok { + if !bytes.Equal(cp.curPool.blocks[idx].key, key) { + return + } + cp.curPool.blocks[idx].value = value + } +} + +func (cp *SigToEpochPrimary) getCached(blk types.Block) ([]byte, []byte, error) { + cp.poolLk.RLock() + defer cp.poolLk.RUnlock() + idx, ok := cp.nextPool.refs[blk] + if ok { + br := cp.nextPool.blocks[idx] + return br.key, br.value, nil + } + idx, ok = cp.curPool.refs[blk] + if ok { + br := cp.curPool.blocks[idx] + return br.key, br.value, nil + } + if blk.Offset >= absolutePrimaryPos(cp.recPos, cp.recFileNum, cp.maxFileSize) { + return nil, nil, fmt.Errorf("error getting cached multihashed primary: %w", types.ErrOutOfBounds) + } + return nil, nil, nil +} + +func (cp *SigToEpochPrimary) Get(blk types.Block) ([]byte, []byte, error) { + key, value, err := cp.getCached(blk) + if err != nil { + return nil, nil, err + } + if key != nil && value != nil { + return key, value, nil + } + + localPos, fileNum := localizePrimaryPos(blk.Offset, cp.maxFileSize) + + file, err := cp.fileCache.Open(primaryFileName(cp.basePath, fileNum)) + if err != nil { + return nil, nil, err + } + defer cp.fileCache.Close(file) + + read := make([]byte, int(blk.Size)) + if _, err = file.ReadAt(read, int64(localPos)); err != nil { + return nil, nil, fmt.Errorf("error reading data from gsfa primary: %w", err) + } + + return readNode(read) +} + +type Signature []byte + +// readNode extracts the signature from the data read and splits key and value. +func readNode(data []byte) (Signature, []byte, error) { + c, n, err := readSignature(data) + if err != nil { + return Signature{}, nil, err + } + + return c, data[n:], nil +} + +func readSignature(buf []byte) (Signature, int, error) { + // the signature is the first 64 bytes + if len(buf) < 64 { + return Signature{}, 0, fmt.Errorf("error reading signature from primary: expected at least 64 bytes, got %d", len(buf)) + } + sig := buf[:64] + return sig, 64, nil +} + +func _clone(b []byte) []byte { + if b == nil { + return nil + } + return append(b[:0:0], b...) +} + +// Put adds a new pending blockRecord to the pool and returns a Block that +// contains the location that the block will occupy in the primary. The +// returned primary location must be an absolute position across all primary +// files. +func (cp *SigToEpochPrimary) Put(key []byte, value []byte) (types.Block, error) { + recSize := int64(len(key) + len(value)) + dataSize := primaryRecordSize + if recSize != int64(dataSize) { + return types.Block{}, fmt.Errorf("expected record size %d, got %d", dataSize, recSize) + } + + cp.poolLk.Lock() + defer cp.poolLk.Unlock() + + if cp.recPos >= types.Position(cp.maxFileSize) { + cp.recFileNum++ + cp.recPos = 0 + } + + // Tell index the location that this record will be writtten. + absRecPos := absolutePrimaryPos(cp.recPos, cp.recFileNum, cp.maxFileSize) + blk := types.Block{Offset: absRecPos, Size: types.Size(recSize)} + + cp.recPos += types.Position(dataSize) + + cp.nextPool.refs[blk] = len(cp.nextPool.blocks) + cp.nextPool.blocks = append(cp.nextPool.blocks, blockRecord{_clone(key), _clone(value)}) + cp.outstandingWork += types.Work(dataSize) + return blk, nil +} + +func (cp *SigToEpochPrimary) Overwrite(blk types.Block, key []byte, value []byte) error { + recSize := int64(len(key) + len(value)) + + if recSize != int64(blk.Size) { + return fmt.Errorf("expected record size %d, got %d", blk.Size, recSize) + } + cp.poolLk.Lock() + defer cp.poolLk.Unlock() + + localPos, fileNum := localizePrimaryPos(blk.Offset, cp.maxFileSize) + + fi, err := os.OpenFile(primaryFileName(cp.basePath, fileNum), os.O_WRONLY, 0o666) + if err != nil { + return err + } + defer fi.Close() + payload := append(key, value...) + + // overwrite the record + if _, err = fi.WriteAt(payload, int64(localPos)); err != nil { + return fmt.Errorf("error writing data to gsfa primary: %w", err) + } + cp.upgradeCachedValue(blk, _clone(key), _clone(value)) + return nil +} + +func (cp *SigToEpochPrimary) flushBlock(key []byte, value []byte) (types.Work, error) { + if cp.length >= types.Position(cp.maxFileSize) { + fileNum := cp.fileNum + 1 + primaryPath := primaryFileName(cp.basePath, fileNum) + // If the primary file being opened already exists then fileNum has + // wrapped and there are max uint32 of index files. This means that + // maxFileSize is set far too small or GC is disabled. + if _, err := os.Stat(primaryPath); !os.IsNotExist(err) { + return 0, fmt.Errorf("creating primary file overwrites existing, check file size, gc and path (maxFileSize=%d) (path=%s)", cp.maxFileSize, primaryPath) + } + + file, err := os.OpenFile(primaryPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0o644) + if err != nil { + return 0, fmt.Errorf("cannot open new primary file %s: %w", primaryPath, err) + } + if err = cp.writer.Flush(); err != nil { + return 0, fmt.Errorf("cannot write to primary file %s: %w", cp.file.Name(), err) + } + + cp.file.Close() + cp.writer.Reset(file) + cp.file = file + cp.fileNum = fileNum + cp.length = 0 + } + + size := len(key) + len(value) + if _, err := cp.writer.Write(append(key, value...)); err != nil { + return 0, err + } + + writeSize := size + cp.length += types.Position(writeSize) + + return types.Work(writeSize), nil +} + +func (cp *SigToEpochPrimary) IndexKey(key []byte) ([]byte, error) { + if len(key) != 64 { + return nil, fmt.Errorf("invalid key length: %d", len(key)) + } + // This is a sanity-check to see if it really is a solana signature. + decoded := solana.SignatureFromBytes(key) + return decoded[:], nil +} + +func (cp *SigToEpochPrimary) GetIndexKey(blk types.Block) ([]byte, error) { + key, _, err := cp.Get(blk) + if err != nil { + return nil, err + } + if key == nil { + return nil, nil + } + return cp.IndexKey(key) +} + +// Flush writes outstanding work and buffered data to the primary file. +func (cp *SigToEpochPrimary) Flush() (types.Work, error) { + // Only one Flush at a time, otherwise the 2nd Flush can swap the pools + // while the 1st Flush is still reading the pool being flushed. That could + // cause the pool being read by the 1st Flush to be written to + // concurrently. + cp.flushLock.Lock() + defer cp.flushLock.Unlock() + + cp.poolLk.Lock() + // If no new data, then nothing to do. + if len(cp.nextPool.blocks) == 0 { + cp.poolLk.Unlock() + return 0, nil + } + cp.curPool = cp.nextPool + cp.nextPool = newBlockPool() + cp.outstandingWork = 0 + cp.poolLk.Unlock() + + // The pool lock is released allowing Put to write to nextPool. The + // flushLock is still held, preventing concurrent flushes from changing the + // pools or accessing writer. + + var work types.Work + for _, record := range cp.curPool.blocks { + blockWork, err := cp.flushBlock(record.key, record.value) + if err != nil { + return 0, err + } + work += blockWork + } + err := cp.writer.Flush() + if err != nil { + return 0, fmt.Errorf("cannot flush data to primary file %s: %w", cp.file.Name(), err) + } + + return work, nil +} + +// Sync commits the contents of the primary file to disk. Flush should be +// called before calling Sync. +func (mp *SigToEpochPrimary) Sync() error { + mp.flushLock.Lock() + defer mp.flushLock.Unlock() + return mp.file.Sync() +} + +// Close calls Flush to write work and data to the primary file, and then +// closes the file. +func (mp *SigToEpochPrimary) Close() error { + if mp.closed { + return nil + } + + mp.fileCache.Clear() + + _, err := mp.Flush() + if err != nil { + mp.file.Close() + return err + } + + return mp.file.Close() +} + +func (cp *SigToEpochPrimary) OutstandingWork() types.Work { + cp.poolLk.RLock() + defer cp.poolLk.RUnlock() + return cp.outstandingWork +} + +type Iterator struct { + // The index data we are iterating over + file *os.File + // The current position within the index + pos int64 + // The base index file path + base string + // The current index file number + fileNum uint32 +} + +func (cp *SigToEpochPrimary) Iter() (primary.PrimaryStorageIter, error) { + header, err := readHeader(cp.headerPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + return NewIterator(cp.basePath, header.FirstFile), nil +} + +func NewIterator(basePath string, fileNum uint32) *Iterator { + return &Iterator{ + base: basePath, + fileNum: fileNum, + } +} + +func (iter *Iterator) Next() ([]byte, []byte, error) { + if iter == nil { + return nil, nil, nil + } + + if iter.file == nil { + file, err := os.OpenFile(primaryFileName(iter.base, iter.fileNum), os.O_RDONLY, 0o644) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, io.EOF + } + return nil, nil, err + } + iter.file = file + iter.pos = 0 + } + + size := primaryRecordSize + pos := iter.pos + data := make([]byte, size) + _, err := iter.file.ReadAt(data, pos) + if err != nil { + iter.file.Close() + // if err == io.EOF { + // err = io.ErrUnexpectedEOF + // } + return nil, nil, err + } + + iter.pos += int64(size) + return readNode(data) +} + +func (iter *Iterator) Close() error { + if iter.file == nil { + return nil + } + return iter.file.Close() +} + +// StorageSize returns bytes of storage used by the primary files. +func (cp *SigToEpochPrimary) StorageSize() (int64, error) { + header, err := readHeader(cp.headerPath) + if err != nil { + if os.IsNotExist(err) { + return 0, nil + } + return 0, err + } + fi, err := os.Stat(cp.headerPath) + if err != nil { + return 0, err + } + size := fi.Size() + + fileNum := header.FirstFile + for { + primaryName := primaryFileName(cp.basePath, fileNum) + + // Get size of primary file. + fi, err = os.Stat(primaryName) + if err != nil { + if os.IsNotExist(err) { + break + } + return 0, err + } + size += fi.Size() + + fileNum++ + } + return size, nil +} + +func primaryFileName(basePath string, fileNum uint32) string { + return fmt.Sprintf("%s.%d", basePath, fileNum) +} + +func primaryPosToFileNum(pos types.Position, maxFileSize uint32) (bool, uint32) { + // Primary pos 0 means there is no data in the primary, so indicate empty. + if pos == 0 { + return false, 0 + } + // The start of the entry determines which is file is used. + return true, uint32(pos / types.Position(maxFileSize)) +} + +// localizePrimaryPos decodes a position into a local primary offset and file number. +func localizePrimaryPos(pos types.Position, maxFileSize uint32) (types.Position, uint32) { + ok, fileNum := primaryPosToFileNum(pos, maxFileSize) + if !ok { + // Return 0 local pos to indicate empty bucket. + return 0, 0 + } + // Subtract file offset to get pos within its local file. + localPos := pos - (types.Position(fileNum) * types.Position(maxFileSize)) + return localPos, fileNum +} + +func absolutePrimaryPos(localPos types.Position, fileNum, maxFileSize uint32) types.Position { + return types.Position(maxFileSize)*types.Position(fileNum) + localPos +} + +func findLastPrimary(basePath string, fileNum uint32) (uint32, error) { + var lastFound uint32 + for { + _, err := os.Stat(primaryFileName(basePath, fileNum)) + if err != nil { + if os.IsNotExist(err) { + break + } + return 0, err + } + lastFound = fileNum + fileNum++ + } + return lastFound, nil +} + +var _ primary.PrimaryStorage = &SigToEpochPrimary{} diff --git a/store/primary/sig2epochprimary/sig2epochprimary_test.go b/store/primary/sig2epochprimary/sig2epochprimary_test.go new file mode 100644 index 00000000..5aed8fa8 --- /dev/null +++ b/store/primary/sig2epochprimary/sig2epochprimary_test.go @@ -0,0 +1,198 @@ +package sig2epochprimary_test + +// Copyright 2023 rpcpool +// This file has been modified by github.com/gagliardetto +// +// Copyright 2020 IPLD Team and various authors and contributors +// See LICENSE for details. +import ( + "io" + "path/filepath" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/gagliardetto/solana-go" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/primary/sig2epochprimary" + "github.com/rpcpool/yellowstone-faithful/store/testutil" + "github.com/rpcpool/yellowstone-faithful/store/types" + "github.com/stretchr/testify/require" +) + +// This test is about making sure that inserts into an empty bucket result in a key that is trimmed +// to a single byte. + +func TestIndexPut(t *testing.T) { + tempDir := t.TempDir() + primaryPath := filepath.Join(tempDir, "storethehash.primary") + primaryStorage, err := sig2epochprimary.Open(primaryPath, nil, filecache.New(1), 0) + require.NoError(t, err) + + blks := testutil.GenerateEpochs(5) + expectedOffset := types.Position(0) + for _, blk := range blks { + expectedSize := len(blk.Key[:]) + len(blk.Value) + loc, err := primaryStorage.Put(blk.Key[:], blk.Value) + require.NoError(t, err) + require.Equal(t, expectedOffset, loc.Offset) + require.Equal(t, types.Size(expectedSize), loc.Size) + expectedOffset += types.Position(expectedSize) + } + + outstandingWork := primaryStorage.OutstandingWork() + require.Equal(t, types.Work(expectedOffset), outstandingWork) + work, err := primaryStorage.Flush() + require.NoError(t, err) + require.Equal(t, types.Work(expectedOffset), work) + err = primaryStorage.Sync() + require.NoError(t, err) + + iter := sig2epochprimary.NewIterator(primaryPath, 0) + t.Cleanup(func() { iter.Close() }) + + gotBlocks := make([]testutil.Epoch, 0, len(blks)) + for range blks { + key, value, err := iter.Next() + require.NoError(t, err) + blk := testutil.Epoch{Key: solana.SignatureFromBytes(key), Value: value} + require.NoError(t, err) + gotBlocks = append(gotBlocks, blk) + } + require.Equal(t, blks, gotBlocks) + _, _, err = iter.Next() + require.EqualError(t, err, io.EOF.Error()) + + err = primaryStorage.Close() + require.NoError(t, err) +} + +func TestIndexGetEmptyIndex(t *testing.T) { + tempDir := t.TempDir() + primaryPath := filepath.Join(tempDir, "storethehash.primary") + primaryStorage, err := sig2epochprimary.Open(primaryPath, nil, filecache.New(1), 0) + require.NoError(t, err) + defer primaryStorage.Close() + + key, value, err := primaryStorage.Get(types.Block{ + Offset: 0, + Size: 50, + }) + require.Nil(t, key) + require.Nil(t, value) + require.Error(t, err) +} + +func TestIndexGet(t *testing.T) { + tempDir := t.TempDir() + primaryPath := filepath.Join(tempDir, "storethehash.primary") + primaryStorage, err := sig2epochprimary.Open(primaryPath, nil, filecache.New(1), 0) + require.NoError(t, err) + + // load blocks + blks := testutil.GenerateEpochs(5) + var locs []types.Block + for _, blk := range blks { + loc, err := primaryStorage.Put(blk.Key[:], blk.Value) + require.NoError(t, err) + locs = append(locs, loc) + } + + // should fetch from memory before flush + spew.Dump(blks) + for i, loc := range locs { + expectedBlk := blks[i] + key, value, err := primaryStorage.Get(loc) + require.NoError(t, err) + blk := testutil.Epoch{Key: solana.SignatureFromBytes(key), Value: value} + require.NoError(t, err) + spew.Dump(i, expectedBlk, blk) + require.True(t, expectedBlk.Key.Equals(blk.Key)) + require.Equal(t, expectedBlk.Value, blk.Value) + } + + // should fetch from disk after flush + _, err = primaryStorage.Flush() + require.NoError(t, err) + err = primaryStorage.Sync() + require.NoError(t, err) + + for i, loc := range locs { + expectedBlk := blks[i] + key, value, err := primaryStorage.Get(loc) + require.NoError(t, err) + blk := testutil.Epoch{Key: solana.SignatureFromBytes(key), Value: value} + require.NoError(t, err) + require.True(t, expectedBlk.Key.Equals(blk.Key)) + require.Equal(t, expectedBlk.Value, blk.Value) + } + + err = primaryStorage.Close() + require.NoError(t, err) +} + +func TestFlushRace(t *testing.T) { + const goroutines = 64 + tempDir := t.TempDir() + primaryPath := filepath.Join(tempDir, "storethehash.primary") + primaryStorage, err := sig2epochprimary.Open(primaryPath, nil, filecache.New(1), 0) + require.NoError(t, err) + + // load blocks + blks := testutil.GenerateEpochs(5) + for _, blk := range blks { + _, err := primaryStorage.Put(blk.Key[:], blk.Value) + require.NoError(t, err) + } + + start := make(chan struct{}) + errs := make(chan error) + for n := 0; n < goroutines; n++ { + go func() { + <-start + _, err := primaryStorage.Flush() + errs <- err + }() + } + close(start) + for n := 0; n < goroutines; n++ { + err := <-errs + require.NoError(t, err) + } + + require.NoError(t, primaryStorage.Close()) +} + +func TestFlushExcess(t *testing.T) { + tempDir := t.TempDir() + primaryPath := filepath.Join(tempDir, "storethehash.primary") + primaryStorage, err := sig2epochprimary.Open(primaryPath, nil, filecache.New(1), 0) + require.NoError(t, err) + + // load blocks + blks := testutil.GenerateEpochs(5) + for _, blk := range blks { + _, err := primaryStorage.Put(blk.Key[:], blk.Value) + require.NoError(t, err) + } + + work, err := primaryStorage.Flush() + require.NoError(t, err) + require.NotZero(t, work) + + blks = testutil.GenerateEpochs(5) + for _, blk := range blks { + _, err := primaryStorage.Put(blk.Key[:], blk.Value) + require.NoError(t, err) + } + + work, err = primaryStorage.Flush() + require.NoError(t, err) + require.NotZero(t, work) + + // Another flush with no new data should not do work. + work, err = primaryStorage.Flush() + require.NoError(t, err) + require.Zero(t, work) + + require.NoError(t, primaryStorage.Close()) +} diff --git a/store/primary/sig2epochprimary/upgrade.go b/store/primary/sig2epochprimary/upgrade.go new file mode 100644 index 00000000..a0f7750f --- /dev/null +++ b/store/primary/sig2epochprimary/upgrade.go @@ -0,0 +1,326 @@ +package sig2epochprimary + +// Copyright 2023 rpcpool +// This file has been modified by github.com/gagliardetto +// +// Copyright 2020 IPLD Team and various authors and contributors +// See LICENSE for details. +import ( + "bufio" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/types" +) + +type IndexRemapper struct { + firstFile uint32 + maxFileSize uint32 + sizes []int64 +} + +func (mp *SigToEpochPrimary) NewIndexRemapper() (*IndexRemapper, error) { + header, err := readHeader(mp.headerPath) + if err != nil { + return nil, err + } + + var sizes []int64 + for fileNum := header.FirstFile; fileNum <= mp.fileNum; fileNum++ { + fi, err := os.Stat(primaryFileName(mp.basePath, fileNum)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + sizes = append(sizes, fi.Size()) + } + + // If there are no primary files, or the only primary file is small enough + // that no remapping is needed, return a nil remapper. + if len(sizes) == 0 || (len(sizes) == 1 && sizes[0] < int64(mp.maxFileSize)) { + return nil, nil + } + + return &IndexRemapper{ + firstFile: header.FirstFile, + maxFileSize: mp.maxFileSize, + sizes: sizes, + }, nil +} + +func (ir *IndexRemapper) RemapOffset(pos types.Position) (types.Position, error) { + fileNum := ir.firstFile + newPos := int64(pos) + for _, size := range ir.sizes { + if newPos < size { + return absolutePrimaryPos(types.Position(newPos), fileNum, ir.maxFileSize), nil + } + newPos -= size + fileNum++ + } + return 0, fmt.Errorf("cannot convert out-of-range primary position: %d", pos) +} + +func (ir *IndexRemapper) FileSize() uint32 { + return ir.maxFileSize +} + +func upgradePrimary(ctx context.Context, filePath, headerPath string, maxFileSize uint32, freeList *freelist.FreeList) (uint32, error) { + // If header already exists, or old primary does not exist, then no upgrade. + _, err := os.Stat(headerPath) + if !os.IsNotExist(err) { + // Header already exists, do nothing. + return 0, nil + } + if _, err = os.Stat(filePath); err != nil { + if os.IsNotExist(err) { + // No primary to upgrade. + return 0, nil + } + return 0, err + } + + if ctx.Err() != nil { + return 0, ctx.Err() + } + + log.Infow("Upgrading primary storage and splitting into separate files", "newVersion", PrimaryVersion, "fileSize", maxFileSize) + if freeList != nil { + // Instead of remapping all the primary offsets in the freelist, call + // the garbage collector function to process the freelist and make the + // primary records deleted. This is safer because it can be re-applied + // if there is a failure during this phase. + err := applyFreeList(ctx, freeList, filePath) + if err != nil { + return 0, fmt.Errorf("could not apply freelist to primary: %w", err) + } + } + + fileNum, err := chunkOldPrimary(ctx, filePath, int64(maxFileSize)) + if err != nil { + return 0, fmt.Errorf("error chunking primary: %w", err) + } + + if err = writeHeader(headerPath, newHeader(maxFileSize)); err != nil { + return 0, fmt.Errorf("error writing primary info file: %w", err) + } + + if err = os.Remove(filePath); err != nil { + return 0, fmt.Errorf("cannot remove old primary: %w", err) + } + + log.Infow("Replaced old primary with multiple files", "replaced", filePath, "files", fileNum+1) + log.Infof("Upgraded primary from version 0 to %d", PrimaryVersion) + return fileNum, nil +} + +func chunkOldPrimary(ctx context.Context, name string, fileSizeLimit int64) (uint32, error) { + file, err := os.Open(name) + if err != nil { + return 0, err + } + defer file.Close() + + fi, err := file.Stat() + if err != nil { + return 0, err + } + if fi.Size() == 0 { + return 0, nil + } + + total := fi.Size() + var fileNum uint32 + outName := primaryFileName(name, fileNum) + outFile, err := createFileAppend(outName) + if err != nil { + return 0, err + } + log.Infow("Upgrade created primary file", "file", filepath.Base(outName)) + writer := bufio.NewWriterSize(outFile, blockBufferSize) + + sizeBuf := make([]byte, sizePrefixSize) + var written int64 + var count int + var pos int64 + scratch := make([]byte, 1024) + + for { + _, err = file.ReadAt(sizeBuf, pos) + if err != nil { + if err != io.EOF { + log.Errorw("Error reading primary", "err", err) + } + break + } + size := binary.LittleEndian.Uint32(sizeBuf) + if _, err = writer.Write(sizeBuf); err != nil { + outFile.Close() + return 0, err + } + pos += sizePrefixSize + + del := false + if size&deletedBit != 0 { + size ^= deletedBit + del = true + } + + if int(size) > len(scratch) { + scratch = make([]byte, size) + } + data := scratch[:size] + + if !del { + if _, err = file.ReadAt(data, pos); err != nil { + log.Errorw("Error reading primary", "err", err) + break + } + } + _, err := writer.Write(data) + if err != nil { + outFile.Close() + return 0, err + } + pos += int64(size) + + written += sizePrefixSize + int64(size) + if written >= fileSizeLimit { + if err = writer.Flush(); err != nil { + return 0, err + } + outFile.Close() + if ctx.Err() != nil { + return 0, ctx.Err() + } + fileNum++ + outName = primaryFileName(name, fileNum) + outFile, err = createFileAppend(outName) + if err != nil { + return 0, err + } + log.Infof("Upgrade created primary file %s: %.1f%% done", filepath.Base(outName), float64(1000*pos/total)/10) + writer.Reset(outFile) + written = 0 + } + count++ + } + if written != 0 { + if err = writer.Flush(); err != nil { + return 0, err + } + } + outFile.Close() + return fileNum, nil +} + +func createFileAppend(name string) (*os.File, error) { + return os.OpenFile(name, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0o644) +} + +// applyFreeList reads the freelist and marks the locations in the old primary file +// as dead by setting the deleted bit in the record size field. +func applyFreeList(ctx context.Context, freeList *freelist.FreeList, filePath string) error { + flPath, err := freeList.ToGC() + if err != nil { + return fmt.Errorf("cannot get freelist gc file: %w", err) + } + + fi, err := os.Stat(flPath) + if err != nil { + return fmt.Errorf("cannot stat freelist gc file: %w", err) + } + flSize := fi.Size() + + // If the freelist size is non-zero, then process its records. + var count int + if flSize != 0 { + log.Infof("Applying freelist to primary storage") + + flFile, err := os.OpenFile(flPath, os.O_RDONLY, 0o644) + if err != nil { + return fmt.Errorf("error opening freelist gc file: %w", err) + } + defer flFile.Close() + + primaryFile, err := os.OpenFile(filePath, os.O_RDWR, 0o644) + if err != nil { + return fmt.Errorf("cannot open primary file %s: %w", filePath, err) + } + defer primaryFile.Close() + + fi, err = primaryFile.Stat() + if err != nil { + return fmt.Errorf("cannot stat primary file %s: %w", primaryFile.Name(), err) + } + primarySize := fi.Size() + + total := int(flSize / (types.OffBytesLen + types.SizeBytesLen)) + flIter := freelist.NewIterator(bufio.NewReader(flFile)) + sizeBuf := make([]byte, sizePrefixSize) + percentIncr := 1 + nextPercent := percentIncr + + for { + free, err := flIter.Next() + if err != nil { + // Done reading freelist; log if error. + if err != io.EOF { + log.Errorw("Error reading freelist", "err", err) + } + break + } + + offset := int64(free.Offset) + + if offset > primarySize { + log.Errorw("freelist record has out-of-range primary offset", "offset", offset, "fileSize", primarySize) + continue // skip bad freelist entry + } + + if _, err = primaryFile.ReadAt(sizeBuf, offset); err != nil { + return err + } + recSize := binary.LittleEndian.Uint32(sizeBuf) + if recSize&deletedBit != 0 { + // Already deleted. + continue + } + if recSize != uint32(free.Size) { + log.Errorw("Record size in primary does not match size in freelist", "primaryRecordSize", recSize, "freelistRecordSize", free.Size, "file", flFile.Name(), "offset", offset) + } + + // Mark the record as deleted by setting the highest bit in the + // size. This assumes that the record size is < 2^31. + binary.LittleEndian.PutUint32(sizeBuf, recSize|deletedBit) + _, err = primaryFile.WriteAt(sizeBuf, int64(offset)) + if err != nil { + return fmt.Errorf("cannot write to primary file %s: %w", flFile.Name(), err) + } + + count++ + + // Log at every percent increment. + percent := 100 * count / total + if percent >= nextPercent { + log.Infof("Processed %d of %d freelist records: %d%% done", count, total, percent) + nextPercent += percentIncr + } + } + log.Infow("Marked primary records from freelist as deleted", "count", count) + flFile.Close() + } + + if err = os.Remove(flPath); err != nil { + return fmt.Errorf("error removing freelist: %w", err) + } + + return nil +} diff --git a/store/primary/sig2epochprimary/upgrade_test.go b/store/primary/sig2epochprimary/upgrade_test.go new file mode 100644 index 00000000..4ea0bdfe --- /dev/null +++ b/store/primary/sig2epochprimary/upgrade_test.go @@ -0,0 +1,175 @@ +package sig2epochprimary + +// Copyright 2023 rpcpool +// This file has been modified by github.com/gagliardetto +// +// Copyright 2020 IPLD Team and various authors and contributors +// See LICENSE for details. +import ( + "bufio" + "context" + "encoding/binary" + "errors" + "io" + "os" + "path/filepath" + "testing" + + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/types" + "github.com/stretchr/testify/require" +) + +const testPrimaryPath = "valuestore_test/storethehash.data" + +// testFileSizeLimt is the maximum size for new primary files. Using a small +// file size for testing so that the test primary gets split into multiple files. +const testFileSizeLimit = 1024 + +func TestUpgradePrimary(t *testing.T) { + t.Skip("Skipping upgrade test because there upgrade is not supported yet.") + oldFile, err := os.OpenFile(testPrimaryPath, os.O_RDONLY, 0o644) + require.NoError(t, err) + defer oldFile.Close() + + // Scan the old file. + t.Log("Scanning old primary") + oldRecs, err := testScanPrimaryFile(oldFile) + require.NoError(t, err) + + // Return to beginning of old file. + _, err = oldFile.Seek(0, io.SeekStart) + require.NoError(t, err) + + newPrimaryPath := filepath.Join(t.TempDir(), "storethehash.data") + + // Copy test file to new location. + err = copyFile(testPrimaryPath, newPrimaryPath) + require.NoError(t, err) + + newFreeListPath := filepath.Join(t.TempDir(), "storethehash.index.free") + freeList, err := freelist.Open(newFreeListPath) + require.NoError(t, err) + defer freeList.Close() + + // Do the upgrade to split the primary into multiple files. + headerPath := newPrimaryPath + ".info" + updated, err := upgradePrimary(context.Background(), newPrimaryPath, headerPath, testFileSizeLimit, freeList) + require.NoError(t, err) + require.NotZero(t, updated) + + lastChunkNum, err := findLastPrimary(newPrimaryPath, 0) + require.NoError(t, err) + + t.Logf("Split old primary into %d files", lastChunkNum) + require.Equal(t, int(lastChunkNum), 198) + + // Make sure original file was removed. + _, err = os.Stat(newPrimaryPath) + require.True(t, os.IsNotExist(err)) + + var newRecs [][]byte + var fileNum, lastFileNum uint32 + for { + fileName := primaryFileName(newPrimaryPath, fileNum) + newFile, err := os.OpenFile(fileName, os.O_RDONLY, 0o644) + if os.IsNotExist(err) { + break + } + require.NoError(t, err) + + _, err = newFile.Stat() + require.NoError(t, err) + + recs, err := testScanPrimaryFile(newFile) + newFile.Close() + require.NoError(t, err) + + newRecs = append(newRecs, recs...) + + lastFileNum = fileNum + fileNum++ + } + require.Equal(t, lastFileNum, lastChunkNum) + + t.Log("Compare old to new records") + require.Equal(t, len(oldRecs), len(newRecs)) + for i := 0; i < len(oldRecs); i++ { + require.Equal(t, len(oldRecs[i]), len(newRecs[i])) + require.Equal(t, oldRecs[i], newRecs[i]) + } + + // Check that header was created + header, err := readHeader(headerPath) + require.NoError(t, err) + require.Equal(t, header.Version, 1) + require.Equal(t, header.MaxFileSize, uint32(testFileSizeLimit)) + require.Equal(t, header.FirstFile, uint32(0)) + + fc := filecache.New(16) + _, err = Open(newPrimaryPath, nil, fc, 0) + require.Equal(t, err, types.ErrPrimaryWrongFileSize{testFileSizeLimit, defaultMaxFileSize}) + + mp, err := Open(newPrimaryPath, nil, fc, testFileSizeLimit) + require.NoError(t, err) + require.NoError(t, mp.Close()) + + // Run upgrade again to make sure it does nothing. + updated, err = upgradePrimary(context.Background(), newPrimaryPath, headerPath, testFileSizeLimit, freeList) + require.NoError(t, err) + require.Zero(t, updated) +} + +func testScanPrimaryFile(file *os.File) ([][]byte, error) { + var recs [][]byte + + buffered := bufio.NewReader(file) + sizeBuffer := make([]byte, sizePrefixSize) + scratch := make([]byte, 256) + for { + _, err := io.ReadFull(buffered, sizeBuffer) + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + size := binary.LittleEndian.Uint32(sizeBuffer) + + if int(size) > len(scratch) { + scratch = make([]byte, size) + } + data := scratch[:size] + _, err = io.ReadFull(buffered, data) + if err != nil { + if err == io.EOF { + return nil, errors.New("unexpected EOF") + } + return nil, err + } + + rec := make([]byte, len(sizeBuffer)+len(data)) + copy(rec, sizeBuffer) + copy(rec[len(sizeBuffer):], data) + recs = append(recs, rec) + } + return recs, nil +} + +func copyFile(src, dst string) error { + fin, err := os.Open(src) + if err != nil { + return err + } + defer fin.Close() + + fout, err := os.Create(dst) + if err != nil { + return err + } + defer fout.Close() + + _, err = io.Copy(fout, fin) + return err +} diff --git a/store/primary/sig2epochprimary/valuestore_test/storethehash.data b/store/primary/sig2epochprimary/valuestore_test/storethehash.data new file mode 100644 index 00000000..4c2d77c6 Binary files /dev/null and b/store/primary/sig2epochprimary/valuestore_test/storethehash.data differ diff --git a/gsfa/store/store.go b/store/store.go similarity index 92% rename from gsfa/store/store.go rename to store/store.go index db8867d8..15afe49c 100644 --- a/gsfa/store/store.go +++ b/store/store.go @@ -17,19 +17,21 @@ import ( "time" logging "github.com/ipfs/go-log/v2" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/filecache" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/index" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/primary/gsfaprimary" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + "github.com/rpcpool/yellowstone-faithful/store/filecache" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/index" + "github.com/rpcpool/yellowstone-faithful/store/primary" + "github.com/rpcpool/yellowstone-faithful/store/primary/gsfaprimary" + "github.com/rpcpool/yellowstone-faithful/store/primary/sig2epochprimary" + "github.com/rpcpool/yellowstone-faithful/store/types" ) var log = logging.Logger("storethehash") const ( // Primary types - GsfaPrimary = "gsfaprimary" + GsfaPrimary = "gsfaprimary" + SigToEpochPrimary = "sig2epochprimary" ) type Store struct { @@ -53,6 +55,13 @@ type Store struct { flushNow chan struct{} syncInterval time.Duration syncOnFlush bool + immutable bool +} + +// SetReturnErrorOnDuplicatePut sets whether to return an error when a duplicate key is +// inserted. +func (s *Store) SetReturnErrorOnDuplicatePut(yes bool) { + s.immutable = yes } // OpenStore opens the index and returns a Store with the specified primary type. @@ -82,6 +91,8 @@ func OpenStore(ctx context.Context, primaryType string, dataPath, indexPath stri switch primaryType { case GsfaPrimary: primary, err = gsfaprimary.Open(dataPath, freeList, fileCache, c.primaryFileSize) + case SigToEpochPrimary: + primary, err = sig2epochprimary.Open(dataPath, freeList, fileCache, c.primaryFileSize) default: err = fmt.Errorf("unsupported primary type: %s", primaryType) } @@ -106,11 +117,6 @@ func OpenStore(ctx context.Context, primaryType string, dataPath, indexPath stri return nil, err } - _, ok := primary.(*gsfaprimary.GsfaPrimary) - if !ok { - return nil, fmt.Errorf("unsupported primary type: %T", primary) - } - store := &Store{ lastFlush: time.Now(), index: idx, @@ -344,6 +350,23 @@ func (s *Store) setErr(err error) { s.stateLk.Unlock() } +type ErrDuplicate struct { + Key []byte + StoredKey []byte + Value []byte + StoredValue []byte +} + +func (e *ErrDuplicate) Error() string { + return fmt.Sprintf("duplicate key: %x", e.Key) +} + +// Is returns true if the error is an ErrDuplicate. +func (e *ErrDuplicate) Is(err error) bool { + _, ok := err.(*ErrDuplicate) + return ok +} + func (s *Store) Put(key []byte, newValue []byte) error { err := s.Err() if err != nil { @@ -377,6 +400,15 @@ func (s *Store) Put(key []byte, newValue []byte) error { if storedKey != nil { // if we're not accepting updates, this is the point we bail -- // the identical key is in primary storage, we don't do update operations + + if s.immutable { + return &ErrDuplicate{ + Key: key, + StoredKey: storedKey, + Value: newValue, + StoredValue: storedVal, + } + } cmpKey = true } // TODO: the key-value that we got here might be from the cache of primary storage, @@ -386,15 +418,17 @@ func (s *Store) Put(key []byte, newValue []byte) error { // directly return. return nil } - // overwrite in primary storage: - err = s.index.Primary.Overwrite(prevOffset, key, newValue) - if err != nil { - return err - } - // TODO: remove? - s.flushTick() + if storedKey != nil && bytes.Equal(indexKey, storedKey) { + // overwrite in primary storage: + err = s.index.Primary.Overwrite(prevOffset, key, newValue) + if err != nil { + return err + } + // TODO: remove? + s.flushTick() - return nil + return nil + } } // We are ready now to start putting/updating the value in the key. diff --git a/gsfa/store/store_test.go b/store/store_test.go similarity index 82% rename from gsfa/store/store_test.go rename to store/store_test.go index 8239dc57..faba3c2d 100644 --- a/gsfa/store/store_test.go +++ b/store/store_test.go @@ -8,10 +8,10 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - store "github.com/rpcpool/yellowstone-faithful/gsfa/store" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/freelist" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/testutil" - "github.com/rpcpool/yellowstone-faithful/gsfa/store/types" + store "github.com/rpcpool/yellowstone-faithful/store" + "github.com/rpcpool/yellowstone-faithful/store/freelist" + "github.com/rpcpool/yellowstone-faithful/store/testutil" + "github.com/rpcpool/yellowstone-faithful/store/types" "github.com/stretchr/testify/require" ) @@ -34,12 +34,12 @@ func TestUpdate(t *testing.T) { blks := testutil.GenerateEntries(2) t.Logf("Putting a new block") - err = s.Put(blks[0].Key.Bytes(), blks[0].RawValue()) + err = s.Put(blks[0].Key.Bytes(), blks[0].Value) require.NoError(t, err) value, found, err := s.Get(blks[0].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, value, blks[0].RawValue()) + require.Equal(t, value, blks[0].Value) { _, err = s.Primary().Flush() @@ -50,7 +50,7 @@ func TestUpdate(t *testing.T) { t.Logf("Overwrite same key with different value") spew.Dump(blks) - err = s.Put(blks[0].Key.Bytes(), blks[1].RawValue()) + err = s.Put(blks[0].Key.Bytes(), blks[1].Value) require.NoError(t, err) { @@ -63,25 +63,25 @@ func TestUpdate(t *testing.T) { value, found, err = s.Get(blks[0].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, blks[1].RawValue()[0:8], value[0:8], "value should be overwritten") - require.Equal(t, blks[1].RawValue(), value, "value should be overwritten") - require.NotEqual(t, blks[0].RawValue(), value, "value should be overwritten") + require.Equal(t, blks[1].Value[0:8], value[0:8], "value should be overwritten") + require.Equal(t, blks[1].Value, value, "value should be overwritten") + require.NotEqual(t, blks[0].Value, value, "value should be overwritten") { it, err := s.Primary().Iter() require.NoError(t, err) key, value, err := it.Next() require.NoError(t, err) require.Equal(t, blks[0].Key.Bytes(), key) - require.Equal(t, blks[1].RawValue(), value) + require.Equal(t, blks[1].Value, value) } t.Logf("Overwrite same key with same value") - err = s.Put(blks[0].Key.Bytes(), blks[1].RawValue()) + err = s.Put(blks[0].Key.Bytes(), blks[1].Value) require.NoError(t, err) value, found, err = s.Get(blks[0].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, value, blks[1].RawValue()) + require.Equal(t, value, blks[1].Value) s.Flush() @@ -95,7 +95,7 @@ func TestUpdate(t *testing.T) { require.Zero(t, count) require.NoError(t, err) require.Equal(t, blks[0].Key.Bytes(), key) - require.Equal(t, blks[1].RawValue(), val) + require.Equal(t, blks[1].Value, val) count++ } }) @@ -108,9 +108,9 @@ func TestRemove(t *testing.T) { blks := testutil.GenerateEntries(2) t.Logf("Putting blocks") - err = s.Put(blks[0].Key.Bytes(), blks[0].RawValue()) + err = s.Put(blks[0].Key.Bytes(), blks[0].Value) require.NoError(t, err) - err = s.Put(blks[1].Key.Bytes(), blks[1].RawValue()) + err = s.Put(blks[1].Key.Bytes(), blks[1].Value) require.NoError(t, err) t.Logf("Removing the first block") @@ -122,7 +122,7 @@ func TestRemove(t *testing.T) { value, found, err := s.Get(blks[1].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, value, blks[1].RawValue()) + require.Equal(t, value, blks[1].Value) _, found, err = s.Get(blks[0].Key.Bytes()) require.NoError(t, err) require.False(t, found) @@ -164,7 +164,7 @@ func TestTranslate(t *testing.T) { // Store blocks. blks := testutil.GenerateEntries(5) for i := range blks { - err = s1.Put(blks[i].Key.Bytes(), blks[i].RawValue()) + err = s1.Put(blks[i].Key.Bytes(), blks[i].Value) require.NoError(t, err) } // REmove on block. @@ -185,7 +185,7 @@ func TestTranslate(t *testing.T) { value, found, err := s2.Get(blks[i].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, value, blks[i].RawValue()) + require.Equal(t, value, blks[i].Value) } // Check that removed block was not found. @@ -206,7 +206,7 @@ func TestTranslate(t *testing.T) { value, found, err := s3.Get(blks[i].Key.Bytes()) require.NoError(t, err) require.True(t, found) - require.Equal(t, value, blks[i].RawValue()) + require.Equal(t, value, blks[i].Value) } // Check that removed block was not found. diff --git a/gsfa/store/testutil/testutil.go b/store/testutil/testutil.go similarity index 62% rename from gsfa/store/testutil/testutil.go rename to store/testutil/testutil.go index d98f8b14..92e2102e 100644 --- a/gsfa/store/testutil/testutil.go +++ b/store/testutil/testutil.go @@ -14,7 +14,10 @@ import ( // RandomBytes returns a byte array of the given size with random values. func RandomBytes(n int64) []byte { data := make([]byte, n) - _, _ = rand.Read(data) + _, err := rand.Read(data) + if err != nil { + panic(err) + } return data } @@ -23,11 +26,6 @@ type Entry struct { Value []byte // 8 bytes } -// RawValue returns the Value of the Entry. -func (e *Entry) RawValue() []byte { - return e.Value -} - func GenerateEntries(n int) []Entry { generatedEntries := make([]Entry, 0, n) for i := 0; i < n; i++ { @@ -40,3 +38,21 @@ func GenerateEntries(n int) []Entry { } return generatedEntries } + +func GenerateEpochs(n int) []Epoch { + generatedEntries := make([]Epoch, 0, n) + for i := 0; i < n; i++ { + key := solana.SignatureFromBytes(RandomBytes(64)) + value := RandomBytes(2) // The value is 2 bytes long (uint16 little-endian). + generatedEntries = append(generatedEntries, Epoch{ + Key: key, + Value: value, + }) + } + return generatedEntries +} + +type Epoch struct { + Key solana.Signature // 64 bytes + Value []byte // 2 bytes +} diff --git a/gsfa/store/types/errors.go b/store/types/errors.go similarity index 100% rename from gsfa/store/types/errors.go rename to store/types/errors.go diff --git a/gsfa/store/types/types.go b/store/types/types.go similarity index 100% rename from gsfa/store/types/types.go rename to store/types/types.go diff --git a/tools.go b/tools.go new file mode 100644 index 00000000..623091a4 --- /dev/null +++ b/tools.go @@ -0,0 +1,71 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "gopkg.in/yaml.v3" +) + +func isDirectory(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + return info.IsDir(), nil +} + +// exists checks whether a file or directory exists. +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + // file does not exist + return false, nil + } + // other error + return false, err +} + +// isFile checks whether a path is a file. +func isFile(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + return !info.IsDir(), nil +} + +// isJSONFile checks whether a path is a JSON file. +func isJSONFile(filepath string) bool { + return filepath[len(filepath)-5:] == ".json" +} + +// isYAMLFile checks whether a path is a YAML file. +func isYAMLFile(filepath string) bool { + return filepath[len(filepath)-5:] == ".yaml" || filepath[len(filepath)-4:] == ".yml" +} + +// loadFromJSON loads a JSON file into dst (which must be a pointer). +func loadFromJSON(configFilepath string, dst any) error { + file, err := os.Open(configFilepath) + if err != nil { + return fmt.Errorf("failed to open config file: %w", err) + } + defer file.Close() + return json.NewDecoder(file).Decode(dst) +} + +// loadFromYAML loads a YAML file into dst (which must be a pointer). +func loadFromYAML(configFilepath string, dst any) error { + file, err := os.Open(configFilepath) + if err != nil { + return fmt.Errorf("failed to open config file: %w", err) + } + defer file.Close() + + return yaml.NewDecoder(file).Decode(dst) +} diff --git a/tools/compress-gsfa.sh b/tools/compress-gsfa.sh new file mode 100755 index 00000000..1d623c85 --- /dev/null +++ b/tools/compress-gsfa.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -o pipefail +set -e + +# the provided folder must exist and end with -gsfa-index or -gsfa-index/ +if [ ! -d "$1" ]; then + echo "The provided index folder does not exist" + exit 1 +fi +# must have suffix -gsfa-index or -gsfa-index/ +if [ "${1: -11}" != "-gsfa-index" ] && [ "${1: -12}" != "-gsfa-index/" ]; then + echo "The provided index folder must end with -gsfa-index or -gsfa-index/" + exit 1 +fi +# declare index folder and destination folder, trim trailing slash if present +source_folder="${1%/}" +destination_folder="${2%/}" +destination_file="$destination_folder/$(basename "$source_folder").tar.zst" +# check if destination folder exists +if [ ! -d "$destination_folder" ]; then + echo "The provided destination folder does not exist" + exit 1 +fi +# check if destination file already exists +if [ -f "$destination_file" ]; then + echo "The destination file already exists: $destination_file" + exit 1 +fi +# Get the size of the index folder +index_size=$(du -sh "$source_folder" | cut -f1) +echo "Index folder size: $index_size" +# Get the available space in the destination folder +available_space=$(df -h "$destination_folder" | tail -1 | awk '{print $4}') +echo "Available space in destination folder: $available_space" +echo "Compressing $source_folder to $destination_file ..." +tar -I zstd -cf "$destination_file" -C "$(dirname "$source_folder")" "$(basename "$source_folder")" +echo "Done"