Skip to content

Commit

Permalink
Merge pull request #339 from onflow/fxamacker/remove-empty-next-slab-…
Browse files Browse the repository at this point in the history
…id-in-encoded-array-data

Omit empty next slab ID in encoded array data slab
  • Loading branch information
fxamacker authored Sep 12, 2023
2 parents a5c7323 + cf56a4c commit 79e86d9
Show file tree
Hide file tree
Showing 7 changed files with 86 additions and 78 deletions.
19 changes: 13 additions & 6 deletions array.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,16 +417,17 @@ func newArrayDataSlabFromDataV1(
var extraData *ArrayExtraData
var next SlabID

// Decode header
// Decode extra data
if h.isRoot() {
// Decode extra data
extraData, data, err = newArrayExtraDataFromData(data, decMode, decodeTypeInfo)
if err != nil {
// err is categorized already by newArrayExtraDataFromData.
return nil, err
}
} else {
// Decode next slab ID
}

// Decode next slab ID
if h.hasNextSlabID() {
next, err = NewSlabIDFromRawBytes(data)
if err != nil {
// error returned from NewSlabIDFromRawBytes is categorized already.
Expand Down Expand Up @@ -516,6 +517,10 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error {
h.setHasPointers()
}

if a.next != SlabIDUndefined {
h.setHasNextSlabID()
}

if a.extraData != nil {
h.setRoot()
}
Expand All @@ -534,8 +539,10 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error {
// err is already categorized by ArrayExtraData.Encode().
return err
}
} else {
// Encode next slab ID to scratch
}

// Encode next slab ID
if a.next != SlabIDUndefined {
n, err := a.next.ToRawBytes(enc.Scratch[:])
if err != nil {
// Don't need to wrap because err is already categorized by SlabID.ToRawBytes().
Expand Down
75 changes: 49 additions & 26 deletions array_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,17 +446,15 @@ func validArraySlabSerialization(
}

// Extra check: encoded data size == header.size
encodedExtraDataSize, err := getEncodedArrayExtraDataSize(slab.ExtraData(), cborEncMode)
encodedSlabSize, err := computeSlabSize(data)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by getEncodedArrayExtraDataSize().
// Don't need to wrap error as external error because err is already categorized by computeSlabSize().
return err
}

// Need to exclude extra data size from encoded data size.
encodedSlabSize := uint32(len(data) - encodedExtraDataSize)
if slab.Header().size != encodedSlabSize {
return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d (encoded extra data size %d)",
id, encodedSlabSize, slab.Header().size, encodedExtraDataSize))
if slab.Header().size != uint32(encodedSlabSize) {
return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d",
id, encodedSlabSize, slab.Header().size))
}

// Compare encoded data of original slab with encoded data of decoded slab
Expand Down Expand Up @@ -640,25 +638,6 @@ func arrayExtraDataEqual(expected, actual *ArrayExtraData) error {
return nil
}

func getEncodedArrayExtraDataSize(extraData *ArrayExtraData, cborEncMode cbor.EncMode) (int, error) {
if extraData == nil {
return 0, nil
}

var buf bytes.Buffer
enc := NewEncoder(&buf, cborEncMode)

// Normally the flag shouldn't be 0. But in this case we just need the encoded data size
// so the content of the flag doesn't matter.
err := extraData.Encode(enc)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by ArrayExtraData.Encode().
return 0, err
}

return len(buf.Bytes()), nil
}

func ValidValueSerialization(
value Value,
cborDecMode cbor.DecMode,
Expand Down Expand Up @@ -690,3 +669,47 @@ func ValidValueSerialization(
}
return nil
}

func computeSlabSize(data []byte) (int, error) {
if len(data) < versionAndFlagSize {
return 0, NewDecodingError(fmt.Errorf("data is too short"))
}

h, err := newHeadFromData(data[:versionAndFlagSize])
if err != nil {
return 0, NewDecodingError(err)
}

slabExtraDataSize, err := getExtraDataSize(h, data[versionAndFlagSize:])
if err != nil {
return 0, err
}

// Computed slab size (slab header size):
// - excludes slab extra data size
// - adds next slab ID for non-root data slab if not encoded
size := len(data) - slabExtraDataSize

isDataSlab := h.getSlabArrayType() == slabArrayData ||
h.getSlabMapType() == slabMapData ||
h.getSlabMapType() == slabMapCollisionGroup

if !h.isRoot() && isDataSlab && !h.hasNextSlabID() {
size += slabIDSize
}

return size, nil
}

func getExtraDataSize(h head, data []byte) (int, error) {
if h.isRoot() {
dec := cbor.NewStreamDecoder(bytes.NewBuffer(data))
b, err := dec.DecodeRawBytes()
if err != nil {
return 0, NewDecodingError(err)
}
return len(b), nil
}

return 0, nil
}
4 changes: 1 addition & 3 deletions array_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1899,7 +1899,7 @@ func TestArrayEncodeDecode(t *testing.T) {
// (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa]
id2: {
// version
0x10,
0x12,
// array data slab flag
0x00,
// next slab id
Expand All @@ -1924,8 +1924,6 @@ func TestArrayEncodeDecode(t *testing.T) {
0x10,
// array data slab flag
0x40,
// next slab id
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// CBOR encoded array head (fixed size 3 byte)
0x99, 0x00, 0x0b,
// CBOR encoded array elements
Expand Down
22 changes: 14 additions & 8 deletions map.go
Original file line number Diff line number Diff line change
Expand Up @@ -2200,20 +2200,21 @@ func newMapDataSlabFromDataV1(
var extraData *MapExtraData
var next SlabID

// Decode header
// Decode extra data
if h.isRoot() {
// Decode extra data
extraData, data, err = newMapExtraDataFromData(data, decMode, decodeTypeInfo)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by newMapExtraDataFromData().
return nil, err
}
} else {
}

// Decode next slab ID
if h.hasNextSlabID() {
if len(data) < slabIDSize {
return nil, NewDecodingErrorf("data is too short for map data slab")
}

// Decode next slab ID
next, err = NewSlabIDFromRawBytes(data)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by NewSlabIDFromRawBytes().
Expand Down Expand Up @@ -2291,6 +2292,10 @@ func (m *MapDataSlab) Encode(enc *Encoder) error {
h.setHasPointers()
}

if m.next != SlabIDUndefined {
h.setHasNextSlabID()
}

if m.anySize {
h.setNoSizeLimit()
}
Expand All @@ -2305,16 +2310,17 @@ func (m *MapDataSlab) Encode(enc *Encoder) error {
return NewEncodingError(err)
}

// Encode header
// Encode extra data
if m.extraData != nil {
// Encode extra data
err = m.extraData.Encode(enc)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode().
return err
}
} else {
// Encode next slab ID to scratch
}

// Encode next slab ID
if m.next != SlabIDUndefined {
n, err := m.next.ToRawBytes(enc.Scratch[:])
if err != nil {
// Don't need to wrap error as external error because err is already categorized by SlabID.ToRawBytes().
Expand Down
29 changes: 5 additions & 24 deletions map_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -849,18 +849,16 @@ func validMapSlabSerialization(
}

// Extra check: encoded data size == header.size
encodedExtraDataSize, err := getEncodedMapExtraDataSize(slab.ExtraData(), cborEncMode)
encodedSlabSize, err := computeSlabSize(data)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by getEncodedMapExtraDataSize().
// Don't need to wrap error as external error because err is already categorized by computeSlabSize().
return err
}

// Need to exclude extra data size from encoded data size.
encodedSlabSize := uint32(len(data) - encodedExtraDataSize)
if slab.Header().size != encodedSlabSize {
if slab.Header().size != uint32(encodedSlabSize) {
return NewFatalError(
fmt.Errorf("slab %d encoded size %d != header.size %d (encoded extra data size %d)",
id, encodedSlabSize, slab.Header().size, encodedExtraDataSize))
fmt.Errorf("slab %d encoded size %d != header.size %d",
id, encodedSlabSize, slab.Header().size))
}

// Compare encoded data of original slab with encoded data of decoded slab
Expand Down Expand Up @@ -1357,20 +1355,3 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error {

return nil
}

func getEncodedMapExtraDataSize(extraData *MapExtraData, cborEncMode cbor.EncMode) (int, error) {
if extraData == nil {
return 0, nil
}

var buf bytes.Buffer
enc := NewEncoder(&buf, cborEncMode)

err := extraData.Encode(enc)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode().
return 0, err
}

return len(buf.Bytes()), nil
}
11 changes: 3 additions & 8 deletions map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2737,7 +2737,7 @@ func TestMapEncodeDecode(t *testing.T) {
// data slab
id2: {
// version
0x10,
0x12,
// flag: map data
0x08,
// next slab id
Expand Down Expand Up @@ -2789,8 +2789,6 @@ func TestMapEncodeDecode(t *testing.T) {
0x10,
// flag: has pointer + map data
0x48,
// next slab id
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

// the following encoded data is valid CBOR

Expand Down Expand Up @@ -2864,7 +2862,8 @@ func TestMapEncodeDecode(t *testing.T) {
require.True(t, ok)
require.Equal(t, 2, len(meta.childrenHeaders))
require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size)
require.Equal(t, uint32(len(stored[id3])), meta.childrenHeaders[1].size)
// Need to add slabIDSize to encoded data slab here because empty slab ID is omitted during encoding.
require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size)

// Decode data to new storage
storage2 := newTestPersistentStorageWithData(t, stored)
Expand Down Expand Up @@ -3392,8 +3391,6 @@ func TestMapEncodeDecode(t *testing.T) {
0x10,
// flag: any size + collision group
0x2b,
// next slab id
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

// the following encoded data is valid CBOR

Expand Down Expand Up @@ -3457,8 +3454,6 @@ func TestMapEncodeDecode(t *testing.T) {
0x10,
// flag: any size + collision group
0x2b,
// next slab id
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

// the following encoded data is valid CBOR

Expand Down
4 changes: 1 addition & 3 deletions storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -927,7 +927,7 @@ func TestPersistentStorageSlabIterator(t *testing.T) {
// (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa]
id2: {
// version
0x10,
0x12,
// array data slab flag
0x00,
// next slab id
Expand All @@ -952,8 +952,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) {
0x10,
// array data slab flag
0x40,
// next slab id
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// CBOR encoded array head (fixed size 3 byte)
0x99, 0x00, 0x0b,
// CBOR encoded array elements
Expand Down

0 comments on commit 79e86d9

Please sign in to comment.