Skip to content

Commit

Permalink
proof: limit decoding allocations
Browse files Browse the repository at this point in the history
  • Loading branch information
guggero committed Sep 27, 2023
1 parent 0c6301e commit e0b897d
Show file tree
Hide file tree
Showing 6 changed files with 158 additions and 4 deletions.
59 changes: 57 additions & 2 deletions proof/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ package proof
import (
"bytes"
"io"
"math"

"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/taproot-assets/asset"
"github.com/lightningnetwork/lnd/tlv"
Expand All @@ -17,6 +19,10 @@ func BlockHeaderEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func BlockHeaderDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > BlockHeaderSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*wire.BlockHeader); ok {
var headerBytes []byte
if err := tlv.DVarBytes(r, &headerBytes, buf, l); err != nil {
Expand All @@ -41,6 +47,10 @@ func TxEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func TxDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > blockchain.MaxBlockWeight {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*wire.MsgTx); ok {
var txBytes []byte
if err := tlv.DVarBytes(r, &txBytes, buf, l); err != nil {
Expand All @@ -64,6 +74,10 @@ func TxMerkleProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func TxMerkleProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*TxMerkleProof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand All @@ -87,6 +101,10 @@ func TaprootProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func TaprootProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > MaxTaprootProofSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*TaprootProof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand All @@ -110,6 +128,10 @@ func SplitRootProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func SplitRootProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > tlv.MaxRecordSize+asset.MaxAssetEncodeSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**TaprootProof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand Down Expand Up @@ -154,10 +176,18 @@ func TaprootProofsDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error {
if err != nil {
return err
}

// Avoid OOM by limiting the number of taproot proofs we accept.
if numProofs > MaxNumTaprootProofs {
return ErrProofInvalid
}

proofs := make([]TaprootProof, 0, numProofs)
for i := uint64(0); i < numProofs; i++ {
var proofBytes []byte
err := asset.VarBytesDecoder(r, &proofBytes, buf, 0)
err := asset.VarBytesDecoder(
r, &proofBytes, buf, MaxTaprootProofSize,
)
if err != nil {
return err
}
Expand Down Expand Up @@ -198,15 +228,28 @@ func AdditionalInputsEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func AdditionalInputsDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > FileMaxSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(*[]File); ok {
numInputs, err := tlv.ReadVarInt(r, buf)
if err != nil {
return err
}

// We only allow this many previous witnesses, so there can't
// be more additional inputs as witnesses.
if numInputs > math.MaxUint16 {
return tlv.ErrRecordTooLarge
}

inputFiles := make([]File, 0, numInputs)
for i := uint64(0); i < numInputs; i++ {
var inputFileBytes []byte
err := asset.VarBytesDecoder(r, &inputFileBytes, buf, 0)
err := asset.VarBytesDecoder(
r, &inputFileBytes, buf, FileMaxSize,
)
if err != nil {
return err
}
Expand All @@ -231,6 +274,10 @@ func CommitmentProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func CommitmentProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > tlv.MaxRecordSize {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**CommitmentProof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand All @@ -254,6 +301,10 @@ func TapscriptProofEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func TapscriptProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > tlv.MaxRecordSize*2 {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**TapscriptProof); ok {
var proofBytes []byte
if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil {
Expand Down Expand Up @@ -302,6 +353,10 @@ func MetaRevealEncoder(w io.Writer, val any, buf *[8]byte) error {
}

func MetaRevealDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error {
if l > MetaDataMaxLen {
return tlv.ErrRecordTooLarge
}

if typ, ok := val.(**MetaReveal); ok {
var revealBytes []byte
if err := tlv.DVarBytes(r, &revealBytes, buf, l); err != nil {
Expand Down
39 changes: 39 additions & 0 deletions proof/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ var (
// ErrNoProofAvailable is the error that's returned when a proof is
// attempted to be fetched from an empty file.
ErrNoProofAvailable = errors.New("no proof available")

// ErrProofFileInvalid is the error that's returned when a proof file is
// invalid.
ErrProofFileInvalid = errors.New("proof file is invalid")
)

// Version denotes the versioning scheme for proof files.
Expand All @@ -32,6 +36,27 @@ type Version uint32
const (
// V0 is the first version of the proof file.
V0 Version = 0

// FileMaxNumProofs is the maximum number of proofs we expect/allow to
// be encoded within a single proof file. Given that there can only be
// one transfer per block, this value would be enough to transfer an
// asset every 10 minutes for 8 years straight. This limitation might be
// lifted at some point when proofs can be compressed into a single
// zero-knowledge proof.
FileMaxNumProofs = 420000

// FileMaxProofSize is the maximum size of a single proof in a proof
// file. The maximum size of a meta reveal is 1 MB, so this value would
// cap the number of additional inputs within a proof to roughly 16 of
// assets with such large meta data.
FileMaxProofSize = 16 * MetaDataMaxLen

// FileMaxSize is the maximum size of a single proof file. This is not
// just FileMaxNumProofs * FileMaxProofSize as only the minting proof
// can commit to a large chunk of meta data. The other proofs are much
// smaller, assuming they don't all have additional inputs. But we must
// cap this value somewhere to avoid OOM attacks.
FileMaxSize = 500 * 1024 * 1024
)

// hashedProof is a struct that contains an encoded proof and its chained
Expand Down Expand Up @@ -172,6 +197,13 @@ func (f *File) Decode(r io.Reader) error {
return err
}

// Cap the number of proofs there can be within a single file to avoid
// OOM attacks. See the comment for FileMaxNumProofs for the reasoning
// behind the value chosen.
if numProofs > FileMaxNumProofs {
return ErrProofFileInvalid
}

var prevHash, currentHash, proofHash [sha256.Size]byte
f.proofs = make([]*hashedProof, numProofs)
for i := uint64(0); i < numProofs; i++ {
Expand All @@ -182,6 +214,13 @@ func (f *File) Decode(r io.Reader) error {
return err
}

// We also need to cap the size of an individual proof. See the
// comment for FileMaxProofSize for the reasoning behind the
// value chosen.
if numProofBytes > FileMaxProofSize {
return ErrProofFileInvalid
}

// Read all bytes that belong to the proof. We don't decode the
// proof itself as we usually only need the last proof anyway.
proofBytes := make([]byte, numProofBytes)
Expand Down
30 changes: 30 additions & 0 deletions proof/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@ import (
"fmt"
"io"

"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/taproot-assets/asset"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/tlv"
)

Expand Down Expand Up @@ -39,6 +41,10 @@ var (
// reveal.
ErrMetaRevealRequired = errors.New("meta reveal required")

// ErrProofInvalid is the error that's returned when a proof file is
// invalid.
ErrProofInvalid = errors.New("proof is invalid")

// RegtestTestVectorName is the name of the test vector file that is
// generated/updated by an actual integration test run on regtest. It is
// exported here, so we can use it in the integration tests.
Expand All @@ -64,6 +70,26 @@ const (
// PrefixMagicBytesLength is the length of the magic bytes that are
// prefixed to individual proofs or proof files.
PrefixMagicBytesLength = 4

// MaxNumTaprootProofs is the maximum number of Taproot proofs there can
// be in a proof. This limit represents the maximum block size in vBytes
// divided by the size of a single P2TR output and is therefore only a
// theoretical limit that can never be reached in practice.
MaxNumTaprootProofs = blockchain.MaxBlockBaseSize / input.P2TRSize

// MaxTaprootProofSize is the maximum size of a single Taproot proof.
// A Taproot proof can contain a commitment proof which at maximum can
// contain two MS-SMT proofs that max out at around 10k bytes each (in
// the worst case).
MaxTaprootProofSize = tlv.MaxRecordSize

// BlockHeaderSize is the size of a Bitcoin block header in bytes.
BlockHeaderSize = 80

// MerkleProofMaxNodes is the maximum number of nodes a merkle proof can
// contain. This is log2(max_num_txs_in_block) + 1, where max number of
// transactions in a block is assumed to be 17k (theoretical value).
MerkleProofMaxNodes = 15
)

var (
Expand Down Expand Up @@ -294,5 +320,9 @@ func (p *Proof) Decode(r io.Reader) error {
if err != nil {
return err
}

// Note, we can't use the DecodeP2P method here, because the additional
// inputs records might be larger than 64k each. Instead, we add
// individual limits to each record.
return stream.Decode(r)
}
27 changes: 26 additions & 1 deletion proof/records.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package proof

import (
"bytes"
"io"

"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/wire"
Expand Down Expand Up @@ -284,5 +285,29 @@ func MetaRevealTypeRecord(metaType *MetaType) tlv.Record {
}

func MetaRevealDataRecord(data *[]byte) tlv.Record {
return tlv.MakePrimitiveRecord(MetaRevealDataType, data)
sizeFunc := func() uint64 {
if data == nil {
return 0
}
return uint64(len(*data))
}
return tlv.MakeDynamicRecord(
MetaRevealDataType, data, sizeFunc, tlv.EVarBytes,
DVarBytesWithLimit(MetaDataMaxLen),
)
}

func DVarBytesWithLimit(limit uint64) tlv.Decoder {
return func(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if l > limit {
return tlv.ErrRecordTooLarge
}

if b, ok := val.(*[]byte); ok {
*b = make([]byte, l)
_, err := io.ReadFull(r, *b)
return err
}
return tlv.NewTypeForDecodingErr(val, "[]byte", l, l)
}
}
2 changes: 1 addition & 1 deletion proof/taproot.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ func (p *TaprootProof) Decode(r io.Reader) error {
if err != nil {
return err
}
return stream.Decode(r)
return stream.DecodeP2P(r)
}

// deriveTaprootKey derives the taproot key backing a Taproot Asset commitment.
Expand Down
5 changes: 5 additions & 0 deletions proof/tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,11 @@ func (p *TxMerkleProof) Decode(r io.Reader) error {
if err != nil {
return err
}

if numNodes > MerkleProofMaxNodes {
return tlv.ErrRecordTooLarge
}

p.Nodes = make([]chainhash.Hash, 0, numNodes)
for i := uint64(0); i < numNodes; i++ {
var hash [chainhash.HashSize]byte
Expand Down

0 comments on commit e0b897d

Please sign in to comment.