Skip to content

Commit

Permalink
Merge pull request #525 from lightninglabs/fuzz-fixes
Browse files Browse the repository at this point in the history
decoding: fix panics by limiting max decode sizes in proofs, commitments and assets
  • Loading branch information
guggero authored Oct 6, 2023
2 parents 5373ccc + 668416e commit afece9e
Show file tree
Hide file tree
Showing 25 changed files with 514 additions and 64 deletions.
8 changes: 8 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,14 @@ flake-unit-race:
@$(call print, "Flake hunting races in unit tests.")
while [ $$? -eq 0 ]; do env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOLIST) | $(XARGS) env $(GOTEST) -race -test.timeout=20m -count=1; done

# =============
# FUZZING
# =============

fuzz:
@$(call print, "Fuzzing packages '$(FUZZPKG)'.")
scripts/fuzz.sh run "$(FUZZPKG)" "$(FUZZ_TEST_RUN_TIME)" "$(FUZZ_NUM_PROCESSES)" "$(FUZZ_TEST_TIMEOUT)"

# =========
# UTILITIES
# =========
Expand Down
2 changes: 1 addition & 1 deletion address/address.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ func (a *Tap) Decode(r io.Reader) error {
if err != nil {
return err
}
return stream.Decode(r)
return stream.DecodeP2P(r)
}

// EncodeAddress returns a bech32m string encoding of a Taproot Asset address.
Expand Down
8 changes: 8 additions & 0 deletions address/address_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package address

import (
"bytes"
"encoding/hex"
"testing"

Expand Down Expand Up @@ -485,3 +486,10 @@ func runBIPTestVector(t *testing.T, testVectors *TestVectors) {
})
}
}

func FuzzAddressDecode(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
a := &Tap{}
_ = a.Decode(bytes.NewReader(data))
})
}
7 changes: 7 additions & 0 deletions asset/asset.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"unicode"
"unicode/utf8"

"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/txscript"
Expand All @@ -30,6 +31,12 @@ const (
// This byte length is equivalent to character count for single-byte
// UTF-8 characters.
MaxAssetNameLength = 64

// MaxAssetEncodeSizeBytes is the size we expect an asset to not exceed
// in its encoded form. This is used to prevent OOMs when decoding
// assets. The main contributing factor to this size are the previous
// witnesses which we currently allow to number up to 65k witnesses.
MaxAssetEncodeSizeBytes = blockchain.MaxBlockWeight
)

// SerializedKey is a type for representing a public key, serialized in the
Expand Down
68 changes: 51 additions & 17 deletions asset/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,22 @@ func VarIntDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
return tlv.NewTypeForDecodingErr(val, "uint64", 8, l)
}

func VarBytesEncoder(w io.Writer, val any, buf *[8]byte) error {
func DVarBytesWithLimit(limit uint64) tlv.Decoder {
return func(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if l > limit {
return tlv.ErrRecordTooLarge
}

if b, ok := val.(*[]byte); ok {
*b = make([]byte, l)
_, err := io.ReadFull(r, *b)
return err
}
return tlv.NewTypeForDecodingErr(val, "[]byte", l, l)
}
}

func InlineVarBytesEncoder(w io.Writer, val any, buf *[8]byte) error {
if t, ok := val.(*[]byte); ok {
if err := tlv.WriteVarInt(w, uint64(len(*t)), buf); err != nil {
return err
Expand All @@ -55,7 +70,9 @@ func VarBytesEncoder(w io.Writer, val any, buf *[8]byte) error {
return tlv.NewTypeForEncodingErr(val, "[]byte")
}

func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
func InlineVarBytesDecoder(r io.Reader, val any, buf *[8]byte,
maxLen uint64) error {

if typ, ok := val.(*[]byte); ok {
bytesLen, err := tlv.ReadVarInt(r, buf)
if err != nil {
Expand All @@ -64,16 +81,16 @@ func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {

// We'll limit all decoded byte slices to prevent memory blow
// ups or panics.
if bytesLen > (2<<24)-1 {
if bytesLen > maxLen {
return fmt.Errorf("%w: %v", ErrByteSliceTooLarge,
bytesLen)
}

var bytes []byte
if err := tlv.DVarBytes(r, &bytes, buf, bytesLen); err != nil {
var decoded []byte
if err := tlv.DVarBytes(r, &decoded, buf, bytesLen); err != nil {
return err
}
*typ = bytes
*typ = decoded
return nil
}
return tlv.NewTypeForEncodingErr(val, "[]byte")
Expand Down Expand Up @@ -268,7 +285,7 @@ func GenesisEncoder(w io.Writer, val any, buf *[8]byte) error {
return err
}
tagBytes := []byte(t.Tag)
if err := VarBytesEncoder(w, &tagBytes, buf); err != nil {
if err := InlineVarBytesEncoder(w, &tagBytes, buf); err != nil {
return err
}
if err := tlv.EBytes32(w, &t.MetaHash, buf); err != nil {
Expand All @@ -290,7 +307,8 @@ func GenesisDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
return err
}
var tag []byte
if err = VarBytesDecoder(r, &tag, buf, 0); err != nil {
err = InlineVarBytesDecoder(r, &tag, buf, MaxAssetNameLength)
if err != nil {
return err
}
genesis.Tag = string(tag)
Expand Down Expand Up @@ -353,7 +371,8 @@ func TxWitnessEncoder(w io.Writer, val any, buf *[8]byte) error {
}
for _, part := range *t {
part := part
if err := VarBytesEncoder(w, &part, buf); err != nil {
err := InlineVarBytesEncoder(w, &part, buf)
if err != nil {
return err
}
}
Expand All @@ -379,7 +398,10 @@ func TxWitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
witness := make(wire.TxWitness, 0, numItems)
for i := uint64(0); i < numItems; i++ {
var item []byte
if err := VarBytesDecoder(r, &item, buf, 0); err != nil {
err = InlineVarBytesDecoder(
r, &item, buf, math.MaxUint16,
)
if err != nil {
return err
}
witness = append(witness, item)
Expand All @@ -401,7 +423,7 @@ func WitnessEncoder(w io.Writer, val any, buf *[8]byte) error {
return err
}
streamBytes := streamBuf.Bytes()
err := VarBytesEncoder(w, &streamBytes, buf)
err := InlineVarBytesEncoder(w, &streamBytes, buf)
if err != nil {
return err
}
Expand Down Expand Up @@ -429,7 +451,9 @@ func WitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
*typ = make([]Witness, 0, numItems)
for i := uint64(0); i < numItems; i++ {
var streamBytes []byte
err := VarBytesDecoder(r, &streamBytes, buf, 0)
err = InlineVarBytesDecoder(
r, &streamBytes, buf, math.MaxUint16,
)
if err != nil {
return err
}
Expand All @@ -453,23 +477,29 @@ func SplitCommitmentEncoder(w io.Writer, val any, buf *[8]byte) error {
return err
}
proofBytes := proof.Bytes()
if err := VarBytesEncoder(w, &proofBytes, buf); err != nil {
err := InlineVarBytesEncoder(w, &proofBytes, buf)
if err != nil {
return err
}
var rootAsset bytes.Buffer
if err := (*t).RootAsset.Encode(&rootAsset); err != nil {
return err
}
rootAssetBytes := rootAsset.Bytes()
return VarBytesEncoder(w, &rootAssetBytes, buf)
return InlineVarBytesEncoder(w, &rootAssetBytes, buf)
}
return tlv.NewTypeForEncodingErr(val, "*SplitCommitment")
}

func SplitCommitmentDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**SplitCommitment); ok {
var proofBytes []byte
if err := VarBytesDecoder(r, &proofBytes, buf, l); err != nil {
err := InlineVarBytesDecoder(r, &proofBytes, buf, l)
if err != nil {
return err
}

Expand All @@ -479,7 +509,7 @@ func SplitCommitmentDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error
}

var rootAssetBytes []byte
err := VarBytesDecoder(r, &rootAssetBytes, buf, l)
err = InlineVarBytesDecoder(r, &rootAssetBytes, buf, l)
if err != nil {
return err
}
Expand Down Expand Up @@ -559,7 +589,7 @@ func GroupKeyEncoder(w io.Writer, val any, buf *[8]byte) error {
return tlv.NewTypeForEncodingErr(val, "*GroupKey")
}

func GroupKeyDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
func GroupKeyDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if typ, ok := val.(**GroupKey); ok {
var (
groupKey GroupKey
Expand All @@ -586,6 +616,10 @@ func LeafEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func LeafDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > MaxAssetEncodeSizeBytes {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*Asset); ok {
var assetBytes []byte
if err := tlv.DVarBytes(r, &assetBytes, buf, l); err != nil {
Expand Down
51 changes: 28 additions & 23 deletions commitment/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,29 +8,6 @@ import (
"github.com/lightningnetwork/lnd/tlv"
)

func ProofEncoder(w io.Writer, val any, buf *[8]byte) error {
if t, ok := val.(*Proof); ok {
return t.Encode(w)
}
return tlv.NewTypeForEncodingErr(val, "*Proof")
}

func ProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if typ, ok := val.(*Proof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
return err
}
var proof Proof
if err := proof.Decode(bytes.NewReader(proofBytes)); err != nil {
return err
}
*typ = proof
return nil
}
return tlv.NewTypeForEncodingErr(val, "*Proof")
}

func AssetProofEncoder(w io.Writer, val any, buf *[8]byte) error {
if t, ok := val.(**AssetProof); ok {
records := []tlv.Record{
Expand All @@ -48,6 +25,12 @@ func AssetProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func AssetProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
// We currently only use this with tlv.DecodeP2P, but in case we ever
// don't, we still want to enforce a limit.
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**AssetProof); ok {
var streamBytes []byte
if err := tlv.DVarBytes(r, &streamBytes, buf, l); err != nil {
Expand Down Expand Up @@ -90,6 +73,12 @@ func TaprootAssetProofEncoder(w io.Writer, val any, buf *[8]byte) error {
func TaprootAssetProofDecoder(r io.Reader, val any, buf *[8]byte,
l uint64) error {

// We currently only use this with tlv.DecodeP2P, but in case we ever
// don't, we still want to enforce a limit.
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*TaprootAssetProof); ok {
var streamBytes []byte
if err := tlv.DVarBytes(r, &streamBytes, buf, l); err != nil {
Expand Down Expand Up @@ -121,6 +110,12 @@ func TreeProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func TreeProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
// We currently only use this with tlv.DecodeP2P, but in case we ever
// don't, we still want to enforce a limit.
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*mssmt.Proof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand Down Expand Up @@ -160,6 +155,16 @@ func TapscriptPreimageEncoder(w io.Writer, val any, buf *[8]byte) error {
func TapscriptPreimageDecoder(r io.Reader, val any, buf *[8]byte,
l uint64) error {

// We currently only use this with tlv.DecodeP2P, but in case we ever
// don't, we still want to enforce a limit.
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if l == 0 {
return ErrInvalidTapscriptPreimageLen
}

if typ, ok := val.(**TapscriptPreimage); ok {
var preimage TapscriptPreimage

Expand Down
2 changes: 1 addition & 1 deletion commitment/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (p *Proof) Decode(r io.Reader) error {
if err != nil {
return err
}
return stream.Decode(r)
return stream.DecodeP2P(r)
}

// DeriveByAssetInclusion derives the Taproot Asset commitment containing the
Expand Down
49 changes: 49 additions & 0 deletions itest/assertions.go
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,55 @@ func AssertAssetProofs(t *testing.T, tapClient taprpc.TaprootAssetsClient,
return exportResp.RawProofFile
}

// AssertMintingProofs make sure the asset minting proofs contain all the
// correct reveal information.
func AssertMintingProofs(t *testing.T, tapd *tapdHarness,
requests []*mintrpc.MintAssetRequest, assets []*taprpc.Asset) {

t.Helper()

ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout)
defer cancel()

for idx, a := range assets {
exportResp, err := tapd.ExportProof(
ctxt, &taprpc.ExportProofRequest{
AssetId: a.AssetGenesis.AssetId,
ScriptKey: a.ScriptKey,
},
)
require.NoError(t, err)

// Also make sure that the RPC can verify the proof as well.
verifyResp, err := tapd.VerifyProof(ctxt, &taprpc.ProofFile{
RawProofFile: exportResp.RawProofFile,
})
require.NoError(t, err)
require.True(t, verifyResp.Valid)

// Also make sure that the RPC can decode the proof as well.
decodeResp, err := tapd.DecodeProof(
ctxt, &taprpc.DecodeProofRequest{
RawProof: exportResp.RawProofFile,
WithMetaReveal: true,
},
)
require.NoError(t, err)

expected := requests[idx].Asset
actual := decodeResp.DecodedProof

require.NotNil(t, actual.MetaReveal)
require.Equal(
t, expected.AssetMeta.Data, actual.MetaReveal.Data,
)
require.Equal(
t, expected.AssetMeta.Type, actual.MetaReveal.Type,
)
}
}

// AssertAssetProofsInvalid makes sure the proofs for the given asset can be
// retrieved from the given daemon but fail to validate.
func AssertAssetProofsInvalid(t *testing.T, tapd *tapdHarness,
Expand Down
Loading

0 comments on commit afece9e

Please sign in to comment.