Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enable custom go linter in CI #25

Merged
merged 3 commits into from
Aug 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -128,14 +128,14 @@ jobs:
- name: Build all lint dependencies
run: make -j build-node-deps

# - name: Lint
# uses: golangci/golangci-lint-action@v3
# with:
# version: latest
# skip-pkg-cache: true
# - name: Custom Lint
# run: |
# go run ./linters ./...
- name: Lint
uses: golangci/golangci-lint-action@v3
with:
version: latest
skip-pkg-cache: true
- name: Custom Lint
run: |
go run ./linters ./...

- name: Set environment variables
run: |
Expand Down
18 changes: 9 additions & 9 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ type BatchPoster struct {
dapWriter daprovider.Writer
// This deviates from the DA spec but is necessary for the batch poster to work efficiently
// since we need to an extended method on the SequencerInbox contract
eigenDAWriter eigenda.EigenDAWriter
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
non4844BatchCount int // Count of consecutive non-4844 batches posted
eigenDAWriter eigenda.EigenDAWriter
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
non4844BatchCount int // Count of consecutive non-4844 batches posted
// This is an atomic variable that should only be accessed atomically.
// An estimate of the number of batches we want to post but haven't yet.
// This doesn't include batches which we don't want to post yet due to the L1 bounds.
Expand Down Expand Up @@ -238,7 +238,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{
Enable: false,
DisableDapFallbackStoreDataOnChain: false,
// This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go
MaxSize: 100000,
MaxSize: 100000,
MaxEigenDABatchSize: 2_000_000,
// Try to fill 3 blobs per batch
Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)/2 - 2000,
Expand Down Expand Up @@ -274,7 +274,7 @@ var TestBatchPosterConfig = BatchPosterConfig{
Enable: true,
MaxSize: 100000,
Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize,
MaxEigenDABatchSize: DefaultBatchPosterConfig.MaxEigenDABatchSize,
MaxEigenDABatchSize: DefaultBatchPosterConfig.MaxEigenDABatchSize,
PollInterval: time.Millisecond * 10,
ErrorDelay: time.Millisecond * 10,
MaxDelay: 0,
Expand All @@ -298,7 +298,7 @@ var EigenDABatchPosterConfig = BatchPosterConfig{
Enable: true,
MaxSize: 100000,
Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize,
MaxEigenDABatchSize: DefaultBatchPosterConfig.MaxEigenDABatchSize,
MaxEigenDABatchSize: DefaultBatchPosterConfig.MaxEigenDABatchSize,
PollInterval: time.Millisecond * 10,
ErrorDelay: time.Millisecond * 10,
MaxDelay: 0,
Expand Down Expand Up @@ -710,7 +710,7 @@ type buildingBatch struct {
msgCount arbutil.MessageIndex
haveUsefulMessage bool
use4844 bool
useEigenDA bool
useEigenDA bool
}

func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64, use4844 bool, useEigenDA bool) *batchSegments {
Expand Down
4 changes: 2 additions & 2 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ func createNodeImpl(
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
} else if config.EigenDA.Enable {
log.Info("EigenDA enabled")
eigenDAService, err := eigenda.NewEigenDA(config.EigenDA.Rpc)
eigenDAService, err := eigenda.NewEigenDA(&config.EigenDA)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -702,7 +702,7 @@ func createNodeImpl(
if daWriter != nil {
dapWriter = daprovider.NewWriterForDAS(daWriter)
}

batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
L1Reader: l1Reader,
Expand Down
4 changes: 2 additions & 2 deletions arbnode/sequencer_inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut

calldata := tx.Data()
data := []byte{daprovider.EigenDAMessageHeaderFlag}
data = append(data, calldata[:]...)
data = append(data, calldata...)

return data, nil
default:
return nil, fmt.Errorf("batch has invalid data location %v", m.dataLocation)
Expand Down
4 changes: 2 additions & 2 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -605,8 +605,8 @@ func mainImpl() int {

// NOTE: since the SRS is stored within the arbitrator and predetermines the max batch size
// supported for proving stateless execution - it could be possible to read from dynamically
// otherwise it maybe best to expose the max supported batch size from the disperser directly
// to ensure dynamically adaptability within the rollup.
// otherwise it maybe best to expose the max supported batch size from the disperser directly
// to ensure dynamically adaptability within the rollup.
if nodeConfig.Node.BatchPoster.Enable && nodeConfig.Node.EigenDA.Enable {
if nodeConfig.Node.BatchPoster.MaxEigenDABatchSize > eigenda.MaxBatchSize {
log.Error("batchPoster's MaxEigenDABatchSize too large.", "MaxEigenDABatchSize", eigenda.MaxBatchSize)
Expand Down
7 changes: 3 additions & 4 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (dapro
}

type BlobPreimageReader struct{}

func (r *BlobPreimageReader) GetBlobs(
ctx context.Context,
batchBlockHash common.Hash,
Expand All @@ -149,8 +150,8 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error {
return nil
}


type EigenDAPreimageReader struct{}

// QueryBlob returns the blob for the given cert from the preimage oracle using the hash of the
// certificate kzg commitment for identifying the preimage.
func (dasReader *EigenDAPreimageReader) QueryBlob(ctx context.Context, cert *eigenda.EigenDABlobInfo, domain string) ([]byte, error) {
Expand All @@ -177,12 +178,10 @@ func (dasReader *EigenDAPreimageReader) QueryBlob(ctx context.Context, cert *eig
println("Error decoding blob: ", err)
return nil, err
}

return decodedBlob, nil
}



// To generate:
// key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001")
// sig, _ := crypto.Sign(make([]byte, 32), key)
Expand Down
11 changes: 4 additions & 7 deletions eigenda/decoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ func DecodeBlob(data []byte) ([]byte, error) {

}


func EncodeBlob(data []byte) ([]byte, error) {
var err error
data, err = encodeBlob(data)
Expand All @@ -104,8 +103,6 @@ func EncodeBlob(data []byte) ([]byte, error) {
return IFFT(data)
}



func encodeBlob(rawData []byte) ([]byte, error) {
codecBlobHeader := make([]byte, 32)
// first byte is always 0 to ensure the codecBlobHeader is a valid bn254 element
Expand All @@ -118,13 +115,13 @@ func encodeBlob(rawData []byte) ([]byte, error) {
// encode raw data modulo bn254
rawDataPadded := codec.ConvertByPaddingEmptyByte(rawData)

// append raw data
encodedData := append(codecBlobHeader, rawDataPadded...)
// append raw data; reassgin avoids copying
encodedData := codecBlobHeader
encodedData = append(encodedData, rawDataPadded...)

return encodedData, nil
}


func IFFT(data []byte) ([]byte, error) {
// we now IFFT data regardless of the encoding type
// convert data to fr.Element
Expand Down Expand Up @@ -156,4 +153,4 @@ func IFFT(data []byte) ([]byte, error) {
}

return rs.ToByteArray(dataIFFTFr, dataFrLenPow2*encoding.BYTES_PER_SYMBOL), nil
}
}
9 changes: 6 additions & 3 deletions eigenda/eigenda.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package eigenda

import (
"context"
"errors"

"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
Expand All @@ -16,7 +17,6 @@ const (
MaxBatchSize = 2_000_000 // 2MB
)


func IsEigenDAMessageHeaderByte(header byte) bool {
return hasBits(header, daprovider.EigenDAMessageHeaderFlag)
}
Expand Down Expand Up @@ -44,8 +44,11 @@ type EigenDA struct {
client *EigenDAProxyClient
}

func NewEigenDA(proxyServerRpc string) (*EigenDA, error) {
client := NewEigenDAProxyClient(proxyServerRpc)
func NewEigenDA(config *EigenDAConfig) (*EigenDA, error) {
if !config.Enable {
return nil, errors.New("EigenDA is not enabled")
}
client := NewEigenDAProxyClient(config.Rpc)

return &EigenDA{
client: client,
Expand Down
9 changes: 4 additions & 5 deletions eigenda/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ type EigenDAProxyClient struct {
client ProxyClient
}

func NewEigenDAProxyClient(RPCUrl string) *EigenDAProxyClient {

func NewEigenDAProxyClient(rpcUrl string) *EigenDAProxyClient {
c := New(&Config{
URL: RPCUrl,
URL: rpcUrl,
})
return &EigenDAProxyClient{client: c}
}
Expand Down Expand Up @@ -89,7 +88,7 @@ func StrToDomainType(s string) DomainType {

// TODO: Add support for custom http client option
type Config struct {
URL string
URL string
}

// ProxyClient is an interface for communicating with the EigenDA proxy server
Expand Down Expand Up @@ -141,7 +140,7 @@ func (c *client) GetData(ctx context.Context, comm []byte, domain DomainType) ([

req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, fmt.Errorf("failed to construct http request: %e", err)
return nil, fmt.Errorf("failed to construct http request: %w", err)
}

req.Header.Set("Content-Type", "application/octet-stream")
Expand Down
11 changes: 5 additions & 6 deletions eigenda/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ type readerForEigenDA struct {
readerEigenDA EigenDAReader
}

const sequencerMsgOffset = 41

func (d *readerForEigenDA) IsValidHeaderByte(headerByte byte) bool {
return IsEigenDAMessageHeaderByte(headerByte)
}
Expand All @@ -35,11 +37,9 @@ func (d *readerForEigenDA) RecoverPayloadFromBatch(
preimageRecorder daprovider.PreimageRecorder,
validateSeqMsg bool,
) ([]byte, error) {
// offset sequencer message at 41
return RecoverPayloadFromEigenDABatch(ctx, sequencerMsg[41:], d.readerEigenDA, preimageRecorder, "binary")
return RecoverPayloadFromEigenDABatch(ctx, sequencerMsg[sequencerMsgOffset:], d.readerEigenDA, preimageRecorder, "binary")
}


func RecoverPayloadFromEigenDABatch(ctx context.Context,
sequencerMsg []byte,
daReader EigenDAReader,
Expand Down Expand Up @@ -83,7 +83,7 @@ func RecoverPayloadFromEigenDABatch(ctx context.Context,

// ParseSequencerMsg parses the inbox tx calldata into a structured EigenDABlobInfo
func ParseSequencerMsg(calldata []byte) (*EigenDABlobInfo, error) {

if len(calldata) < 4 {
return nil, errors.New("calldata is shorter than expected method signature length")
}
Expand Down Expand Up @@ -140,6 +140,5 @@ func (d *binaryReaderForEigenDA) RecoverPayloadFromBatch(
preimageRecorder daprovider.PreimageRecorder,
validateSeqMsg bool,
) ([]byte, error) {
// offset sequencer message at 41
return RecoverPayloadFromEigenDABatch(ctx, sequencerMsg[41:], d.readerEigenDA, preimageRecorder, "binary")
return RecoverPayloadFromEigenDABatch(ctx, sequencerMsg[sequencerMsgOffset:], d.readerEigenDA, preimageRecorder, "binary")
}
30 changes: 28 additions & 2 deletions eigenda/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,9 @@ func (e *EigenDABlobInfo) ToDisperserBlobInfo() (*DisperserBlobInfo, error) {

// Convert BlobVerificationProof
var disperserBlobVerificationProof DisperserBlobVerificationProof
if &e.BlobVerificationProof != nil {
if !e.BlobVerificationProof.IsEmpty() {
var disperserBatchMetadata DisperserBatchMetadata
if &e.BlobVerificationProof.BatchMetadata != nil {
if !e.BlobVerificationProof.BatchMetadata.IsEmpty() {
metadata := e.BlobVerificationProof.BatchMetadata
quorumNumbers := metadata.BatchHeader.QuorumNumbers
quorumSignedPercentages := metadata.BatchHeader.SignedStakeForQuorums
Expand Down Expand Up @@ -395,3 +395,29 @@ func (ip *InboxPayload) Load(callDataValues []interface{}) error {
*ip = payload
return nil
}

// IsEmpty checks if BlobVerificationProof is effectively empty
func (b BlobVerificationProof) IsEmpty() bool {
return b.BatchID == 0 &&
b.BlobIndex == 0 &&
b.BatchMetadata.IsEmpty() &&
len(b.InclusionProof) == 0 &&
len(b.QuorumIndices) == 0
}

// IsEmpty checks if BatchMetadata is effectively empty
func (bm BatchMetadata) IsEmpty() bool {
return bm.BatchHeader.IsEmpty() &&
len(bm.Fee) == 0 &&
bm.SignatoryRecordHash == [32]byte{} &&
bm.ConfirmationBlockNumber == 0 &&
len(bm.BatchHeaderHash) == 0
}

// IsEmpty checks if BatchHeader is effectively empty
func (bh BatchHeader) IsEmpty() bool {
return bh.BlobHeadersRoot == [32]byte{} &&
len(bh.QuorumNumbers) == 0 &&
len(bh.SignedStakeForQuorums) == 0 &&
bh.ReferenceBlockNumber == 0
Comment on lines +400 to +422
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

keep meaning to refactor this type conversions - ty! 🙏🏻

}
1 change: 0 additions & 1 deletion validator/server_jit/jit_machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ func (machine *JitMachine) close() {
func (machine *JitMachine) prove(
ctxIn context.Context, entry *validator.ValidationInput,
) (validator.GoGlobalState, error) {


ctx, cancel := context.WithCancel(ctxIn)
defer cancel() // ensure our cleanup functions run when we're done
Expand Down
Loading