Skip to content

Commit

Permalink
Fix lint issue share package
Browse files Browse the repository at this point in the history
  • Loading branch information
K1li4nL committed May 31, 2024
1 parent 0ccad55 commit a3ee4ef
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 44 deletions.
17 changes: 14 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,12 @@ linters-settings:
- G107 # variables in URLs
- G404 # use of weak random generator

staticcheck:
checks:
- all
- '-SA1019' # disable deprecated check
- '-S1002' # bool often used for constants

linters:
disable-all: true
enable:
Expand Down Expand Up @@ -163,7 +169,7 @@ linters:
#- decorder # checks declaration order and count of types, constants, variables and functions
#- exhaustruct # checks if all structure fields are initialized
#- gci # controls golang package import order and makes it always deterministic
- godox # detects FIXME, TODO and other comment keywords
#- godox # detects FIXME, TODO and other comment keywords
#- goheader # checks is file header matches to pattern
- interfacebloat # checks the number of methods inside an interface
#- ireturn # accept interfaces, return concrete types
Expand Down Expand Up @@ -208,8 +214,7 @@ issues:
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
max-same-issues: 50

max-same-issues: 0
exclude-rules:
- source: "^//\\s*go:generate\\s"
linters: [ lll ]
Expand Down Expand Up @@ -246,3 +251,9 @@ issues:
- linters:
- govet
text: "shadow: declaration of \"err\" shadows declaration"
- path: "share/dkg/pedersen"
linters:
- gocognit
- funlen
- gocyclo
- cyclop
60 changes: 35 additions & 25 deletions share/dkg/pedersen/dkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,18 +375,22 @@ func (d *DistKeyGenerator) Deals() (*DealBundle, error) {
// missing deals. It returns an error if the node is not in the right state, or
// if there is not enough valid shares, i.e. the dkg is failing already.
func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle, error) {

if d.canIssue && d.state != DealPhase {
// oldnode member is not in the right state
return nil, fmt.Errorf("processdeals can only be called after producing shares - state %s", d.state.String())
return nil, fmt.Errorf("processdeals can only be called "+
"after producing shares - state %s", d.state.String())
}

if d.canReceive && !d.canIssue && d.state != InitPhase {
// newnode member which is not in the old group is not in the riht state
return nil, fmt.Errorf("processdeals can only be called once after creating the dkg for a new member - state %s", d.state.String())
return nil, fmt.Errorf("processdeals can only be called once "+
"after creating the dkg for a new member - state %s", d.state.String())
}
if !d.canReceive {
// a node that is only in the old group should not process deals
d.state = ResponsePhase // he moves on to the next phase silently

//nolint:nilnil // protocol defined this way
return nil, nil
}

Expand All @@ -407,7 +411,7 @@ func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle,
continue
}

if bytes.Compare(bundle.SessionID, d.c.Nonce) != 0 {
if !bytes.Equal(bundle.SessionID, d.c.Nonce) {
d.evicted = append(d.evicted, bundle.DealerIndex)
d.c.Error("Deal with invalid session ID")
continue
Expand Down Expand Up @@ -475,7 +479,7 @@ func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle,
}
}
// share is valid -> store it
d.statuses.Set(bundle.DealerIndex, deal.ShareIndex, true)
d.statuses.Set(bundle.DealerIndex, deal.ShareIndex, Success)
d.validShares[bundle.DealerIndex] = share
d.c.Info("Valid deal processed received from dealer", bundle.DealerIndex)
}
Expand All @@ -489,21 +493,21 @@ func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle,
if !found {
continue
}
d.statuses.Set(dealer.Index, uint32(nidx), true)
d.statuses.Set(dealer.Index, uint32(nidx), Success)
}

// producing response part
var responses []Response
var myshares = d.statuses.StatusesForShare(uint32(d.nidx))
for _, node := range d.c.OldNodes {
// if the node is evicted, we don't even need to send a complaint or a
// response response since every honest node evicts him as well.
// response since every honest node evicts him as well.
// XXX Is that always true ? Should we send a complaint still ?
if contains(d.evicted, node.Index) {
continue
}

if myshares[node.Index] {
if myshares[node.Index] == Success {
if d.c.FastSync {
// we send success responses only in fast sync
responses = append(responses, Response{
Expand Down Expand Up @@ -548,7 +552,11 @@ func (d *DistKeyGenerator) ExpectedResponsesFastSync() int {
// - the justification bundle if this node must produce at least one. If nil,
// this node must still wait on the justification phase.
// - error if the dkg must stop now, an unrecoverable failure.
func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (res *Result, jb *JustificationBundle, err error) {
func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (
res *Result,
jb *JustificationBundle,
err error) {

if !d.canReceive && d.state != DealPhase {
// if we are a old node that will leave
return nil, nil, fmt.Errorf("leaving node can process responses only after creating shares")
Expand All @@ -566,7 +574,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (res *Res
// if we are not in fastsync, we expect only complaints
// if there is no complaints all is good
res, err = d.computeResult()
return
return res, jb, err
}

var validAuthors []Index
Expand All @@ -576,15 +584,15 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (res *Res
continue
}
if d.canIssue && bundle.ShareIndex == uint32(d.nidx) {
// just in case we dont treat our own response
// just in case we don't treat our own response
continue
}
if !isIndexIncluded(d.c.NewNodes, bundle.ShareIndex) {
d.c.Error("Response author already evicted")
continue
}

if bytes.Compare(bundle.SessionID, d.c.Nonce) != 0 {
if !bytes.Equal(bundle.SessionID, d.c.Nonce) {
d.c.Error("Response invalid session ID")
d.evictedHolders = append(d.evictedHolders, bundle.ShareIndex)
continue
Expand Down Expand Up @@ -612,6 +620,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (res *Res
if response.Status == Complaint {
foundComplaint = true
}

validAuthors = append(validAuthors, bundle.ShareIndex)
}
}
Expand Down Expand Up @@ -686,7 +695,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (res *Res
d.c.Info(fmt.Sprintf("Producing justifications for node %d", shareIndex))
foundJustifs = true
// mark those shares as resolved in the statuses
d.statuses.Set(uint32(d.oidx), shareIndex, true)
d.statuses.Set(uint32(d.oidx), shareIndex, Success)
}
if !foundJustifs {
// no justifications required from us !
Expand Down Expand Up @@ -718,10 +727,13 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
// an old node leaving the group do not need to process justifications.
// Here we simply return nil to avoid requiring higher level library to
// think about which node should receive which packet
//
//nolint:nilnil // protocol defined this way
return nil, nil
}
if d.state != JustifPhase {
return nil, fmt.Errorf("node can only process justifications after processing responses - current state %s", d.state.String())
return nil, fmt.Errorf("node can only process justifications "+
"after processing responses - current state %s", d.state.String())
}

seen := make(map[uint32]bool)
Expand Down Expand Up @@ -751,7 +763,7 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
d.c.Error("Already evicted dealer - evicting dealer", bundle.DealerIndex)
continue
}
if bytes.Compare(bundle.SessionID, d.c.Nonce) != 0 {
if !bytes.Equal(bundle.SessionID, d.c.Nonce) {
d.evicted = append(d.evicted, bundle.DealerIndex)
d.c.Error("Justification bundle contains invalid session ID - evicting dealer", bundle.DealerIndex)
continue
Expand Down Expand Up @@ -799,7 +811,7 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
d.c.Info("Old share commit and public commit valid", true)
}
// valid share -> mark OK
d.statuses.Set(bundle.DealerIndex, justif.ShareIndex, true)
d.statuses.Set(bundle.DealerIndex, justif.ShareIndex, Success)
if justif.ShareIndex == uint32(d.nidx) {
// store the share if it's for us
d.c.Info("Saving our key share for", justif.ShareIndex)
Expand Down Expand Up @@ -846,7 +858,7 @@ func (d *DistKeyGenerator) computeResult() (*Result, error) {
d.state = FinishPhase
// add a full complaint row on the nodes that are evicted
for _, index := range d.evicted {
d.statuses.SetAll(index, false)
d.statuses.SetAll(index, Complaint)
}
// add all the shares and public polynomials together for the deals that are
// valid ( equivalently or all justified)
Expand All @@ -862,7 +874,6 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
// only old nodes sends shares
shares := make([]*share.PriShare, 0, len(d.c.OldNodes))
coeffs := make(map[Index][]kyber.Point, len(d.c.OldNodes))
var validDealers []Index
for _, n := range d.c.OldNodes {
if !d.statuses.AllTrue(n.Index) {
// this dealer has some unjustified shares
Expand All @@ -886,7 +897,6 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
V: sh,
I: int(n.Index),
})
validDealers = append(validDealers, n.Index)
}

// the private polynomial is generated from the old nodes, thus inheriting
Expand Down Expand Up @@ -1130,22 +1140,22 @@ func (d *DistKeyGenerator) sign(p Packet) ([]byte, error) {
}

func (d *DistKeyGenerator) Info(keyvals ...interface{}) {
d.c.Info(append([]interface{}{"generator"}, keyvals...))
d.c.Info("generator", keyvals)
}

func (d *DistKeyGenerator) Error(keyvals ...interface{}) {
d.c.Info(append([]interface{}{"generator"}, keyvals...))
d.c.Info("generator", keyvals)
}

func (c *Config) Info(keyvals ...interface{}) {
if c.Log != nil {
c.Log.Info(append([]interface{}{"dkg-log"}, keyvals...))
c.Log.Info("dkg-log", keyvals)
}
}

func (c *Config) Error(keyvals ...interface{}) {
if c.Log != nil {
c.Log.Error(append([]interface{}{"dkg-log"}, keyvals...))
c.Log.Error("dkg-log", keyvals)
}
}

Expand All @@ -1166,10 +1176,10 @@ func (c *Config) CheckForDuplicates() error {
return nil
}
if err := checkDuplicate(c.OldNodes); err != nil {
return fmt.Errorf("found duplicate in old nodes list: %v", err)
return fmt.Errorf("found duplicate in old nodes list: %w", err)
}
if err := checkDuplicate(c.NewNodes); err != nil {
return fmt.Errorf("found duplicate in new nodes list: %v", err)
return fmt.Errorf("found duplicate in new nodes list: %w", err)
}
return nil
}
5 changes: 2 additions & 3 deletions share/dkg/pedersen/dkg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,6 @@ func TestSelfEvictionShareHolder(t *testing.T) {
}
require.True(t, len(responses) > 0)

results = nil
for _, node := range newTns {
_, _, err := node.dkg.ProcessResponses(responses)
require.True(t, contains(node.dkg.evictedHolders, newIndexToEvict))
Expand Down Expand Up @@ -622,7 +621,7 @@ func TestDKGThreshold(t *testing.T) {
results := RunDKG(t, tns, conf, dm, rm, jm)
var filtered = results[:0]
for _, n := range tns {
if 0 == n.Index {
if n.Index == 0 {
// node 0 is excluded by all others since he didn't even provide a
// deal at the first phase,i.e. it didn't even provide a public
// polynomial at the first phase.
Expand Down Expand Up @@ -1048,7 +1047,7 @@ func TestDKGTooManyComplaints(t *testing.T) {
results := RunDKG(t, tns, conf, dm, nil, nil)
var filtered = results[:0]
for _, n := range tns {
if 0 == n.Index {
if n.Index == 0 {
// node 0 is excluded by all others since he didn't even provide a
// deal at the first phase,i.e. it didn't even provide a public
// polynomial at the first phase.
Expand Down
6 changes: 4 additions & 2 deletions share/dkg/pedersen/protocol.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,11 @@ func NewProtocol(c *Config, b Board, phaser Phaser, skipVerification bool) (*Pro
}

func (p *Protocol) Info(keyvals ...interface{}) {
p.dkg.c.Info(append([]interface{}{"dkg-step"}, keyvals...))
p.dkg.c.Info("dkg-step", keyvals)
}

func (p *Protocol) Error(keyvals ...interface{}) {
p.dkg.c.Error(append([]interface{}{"dkg-step"}, keyvals...))
p.dkg.c.Error("dkg-step", keyvals)
}

func (p *Protocol) Start() {
Expand All @@ -116,6 +116,7 @@ func (p *Protocol) Start() {
select {
case newPhase := <-p.phaser.NextPhase():
switch newPhase {
case InitPhase:
case DealPhase:
if !p.sendDeals() {
return
Expand Down Expand Up @@ -191,6 +192,7 @@ func (p *Protocol) startFast() {
select {
case newPhase := <-p.phaser.NextPhase():
switch newPhase {
case InitPhase:
case DealPhase:
p.Info("phaser", "msg", "moving to sending deals phase")
if !p.sendDeals() {
Expand Down
20 changes: 11 additions & 9 deletions share/dkg/pedersen/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,20 @@ import (
"strings"
)

type Status int32

const (
Success = true
Complaint = false
Success Status = 0
Complaint Status = 1
)

type BitSet map[uint32]bool
type BitSet map[uint32]Status
type StatusMatrix map[uint32]BitSet

func NewStatusMatrix(dealers []Node, shareHolders []Node, status bool) *StatusMatrix {
func NewStatusMatrix(dealers []Node, shareHolders []Node, status Status) *StatusMatrix {
statuses := make(map[uint32]BitSet)
for _, dealer := range dealers {
bitset := make(map[uint32]bool)
bitset := make(map[uint32]Status)
for _, holder := range shareHolders {
bitset[holder.Index] = status
}
Expand All @@ -44,11 +46,11 @@ func (s *StatusMatrix) StatusesOfDealer(dealerIndex uint32) BitSet {
}

// can panic if indexes are not from the original list of nodes
func (s *StatusMatrix) Set(dealer, share uint32, status bool) {
func (s *StatusMatrix) Set(dealer, share uint32, status Status) {
(*s)[dealer][share] = status
}

func (s *StatusMatrix) SetAll(dealer uint32, status bool) {
func (s *StatusMatrix) SetAll(dealer uint32, status Status) {
for share := range (*s)[dealer] {
(*s)[dealer][share] = status
}
Expand All @@ -73,7 +75,7 @@ func (s *StatusMatrix) CompleteSuccess() bool {
}

// can panic if indexes are not from the original list of nodes
func (s *StatusMatrix) Get(dealer, share uint32) bool {
func (s *StatusMatrix) Get(dealer, share uint32) Status {
return (*s)[dealer][share]
}

Expand All @@ -96,7 +98,7 @@ func (s *StatusMatrix) String() string {
for _, shareIndex := range sharesIdx {
status := (*s)[uint32(dealerIndex)][uint32(shareIndex)]
var st string
if status {
if status == Success {
st = fmt.Sprintf(" %d: ok", shareIndex)
} else {
st = fmt.Sprintf(" %d: no", shareIndex)
Expand Down
4 changes: 2 additions & 2 deletions share/dkg/pedersen/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (d *DealBundle) Sig() []byte {
type Response struct {
// Index of the Dealer for which this response is for
DealerIndex uint32
Status bool
Status Status
}

var _ Packet = (*ResponseBundle)(nil)
Expand All @@ -186,7 +186,7 @@ func (b *ResponseBundle) Hash() ([]byte, error) {
_ = binary.Write(h, binary.BigEndian, b.ShareIndex)
for _, resp := range b.Responses {
_ = binary.Write(h, binary.BigEndian, resp.DealerIndex)
if resp.Status {
if resp.Status == Success {
_ = binary.Write(h, binary.BigEndian, byte(1))
} else {
_ = binary.Write(h, binary.BigEndian, byte(0))
Expand Down

0 comments on commit a3ee4ef

Please sign in to comment.