Skip to content

Commit

Permalink
Fix storing by indices instead of sequentially (#25)
Browse files Browse the repository at this point in the history
* Store shares and coeffs sequentially, not by indices

Issues is that we can not guarantee the sequentiality of node indices so we have
to refer to shares and coeffs by indices not in a sequential way otherwise it
can lead to panics.

* Check for duplicate indices in Config

We should not accept any config that has a duplicate index in either list.
Commit also adds a test for the case where indices are not sequential.
  • Loading branch information
nikkolasg authored Nov 30, 2021
1 parent b61eec4 commit 882a744
Show file tree
Hide file tree
Showing 2 changed files with 159 additions and 6 deletions.
40 changes: 34 additions & 6 deletions share/dkg/dkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,9 @@ func NewDistKeyHandler(c *Config) (*DistKeyGenerator, error) {
secretCoeff = c.Share.Share.V
canIssue = true
}
if err := c.CheckForDuplicates(); err != nil {
return nil, err
}
dpriv = share.NewPriPoly(c.Suite, c.Threshold, secretCoeff, c.Suite.RandomStream())
dpub = dpriv.Commit(c.Suite.Point().Base())
// resharing case and we are included in the new list of nodes
Expand Down Expand Up @@ -810,8 +813,8 @@ func (d *DistKeyGenerator) computeResult() (*Result, error) {

func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
// only old nodes sends shares
shares := make([]*share.PriShare, len(d.c.OldNodes))
coeffs := make([][]kyber.Point, len(d.c.OldNodes))
shares := make([]*share.PriShare, 0, len(d.c.OldNodes))
coeffs := make(map[Index][]kyber.Point, len(d.c.OldNodes))
var validDealers []Index
for _, n := range d.c.OldNodes {
if !d.statuses.AllTrue(n.Index) {
Expand All @@ -832,10 +835,10 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
return nil, fmt.Errorf("BUG: nidx %d private share not found from dealer %d", d.nidx, n.Index)
}
// share of dist. secret. Invertion of rows/column
shares[n.Index] = &share.PriShare{
shares = append(shares, &share.PriShare{
V: sh,
I: int(n.Index),
}
})
validDealers = append(validDealers, n.Index)
}

Expand All @@ -856,13 +859,13 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
// will be held by the new nodes.
finalCoeffs := make([]kyber.Point, d.newT)
for i := 0; i < d.newT; i++ {
tmpCoeffs := make([]*share.PubShare, len(coeffs))
tmpCoeffs := make([]*share.PubShare, 0, len(coeffs))
// take all i-th coefficients
for j := range coeffs {
if coeffs[j] == nil {
continue
}
tmpCoeffs[j] = &share.PubShare{I: j, V: coeffs[j][i]}
tmpCoeffs = append(tmpCoeffs, &share.PubShare{I: int(j), V: coeffs[j][i]})
}

// using the old threshold / length because there are at most
Expand Down Expand Up @@ -1041,3 +1044,28 @@ func (d *DistKeyGenerator) sign(p Packet) ([]byte, error) {
priv := d.c.Longterm
return d.c.Auth.Sign(priv, msg)
}

// CheckForDuplicates looks at the lits of node indices in the OldNodes and
// NewNodes list. It returns an error if there is a duplicate in either list.
// NOTE: It only looks at indices because it is plausible that one party may
// have multiple indices for the protocol, i.e. a higher "weight".
func (c *Config) CheckForDuplicates() error {
checkDuplicate := func(list []Node) error {
hashSet := make(map[Index]bool)
for _, n := range list {
if _, present := hashSet[n.Index]; present {
return fmt.Errorf("index %d", n.Index)
} else {
hashSet[n.Index] = true
}
}
return nil
}
if err := checkDuplicate(c.OldNodes); err != nil {
return fmt.Errorf("found duplicate in old nodes list: %v", err)
}
if err := checkDuplicate(c.NewNodes); err != nil {
return fmt.Errorf("found duplicate in new nodes list: %v", err)
}
return nil
}
125 changes: 125 additions & 0 deletions share/dkg/dkg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package dkg

import (
"fmt"
"math/rand"
"testing"

"github.com/drand/kyber"
Expand Down Expand Up @@ -199,6 +200,110 @@ func RunDKG(t *testing.T, tns []*TestNode, conf Config,
return results
}

// This test is running DKG and resharing with skipped indices given there is no
// guarantees that the indices of the nodes are going to be sequentials.
func TestDKGSkipIndex(t *testing.T) {
n := 5
thr := 4
suite := edwards25519.NewBlakeSHA256Ed25519()
tns := GenerateTestNodes(suite, n)
skippedIndex := rand.Intn(n)
var newIndex uint32 = 53 // XXX should there be a limit to the index ?
tns[skippedIndex].Index = newIndex
list := NodesFromTest(tns)
conf := Config{
Suite: suite,
NewNodes: list,
Threshold: thr,
Auth: schnorr.NewScheme(suite),
}
results := RunDKG(t, tns, conf, nil, nil, nil)
testResults(t, suite, thr, n, results)

for i, t := range tns {
t.res = results[i]
}
testResults(t, suite, thr, n, results)

// we setup now the second group with higher node count and higher threshold
// and we remove one node from the previous group
nodesToAdd := 5
newN := n - 1 + nodesToAdd // we remove one old node
newT := thr + nodesToAdd - 1 // set the threshold to accept one offline new node
var newTns = make([]*TestNode, 0, newN)
// remove a random node from the previous group
offlineToRemove := uint32(rand.Intn(n))
for _, node := range tns {
if node.Index == offlineToRemove {
continue
}
newTns = append(newTns, node)
t.Logf("Added old node newTns[%d].Index = %d\n", len(newTns), newTns[len(newTns)-1].Index)
}
// we also mess up with indexing here
newSkipped := rand.Intn(nodesToAdd)
t.Logf("skippedIndex: %d, newSkipped: %d\n", skippedIndex, newSkipped)
for i := 0; i <= nodesToAdd; i++ {
if i == newSkipped {
continue // gonna get filled up at last iteration
}
// we start at n to be sure we dont overlap with previous indices
newTns = append(newTns, NewTestNode(suite, n+i))
t.Logf("Added new node newTns[%d].Index = %d\n", len(newTns), newTns[len(newTns)-1].Index)
}
newList := NodesFromTest(newTns)
newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
}
SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)

var deals []*DealBundle
for _, node := range newTns {
if node.res == nil {
// new members don't issue deals
continue
}
d, err := node.dkg.Deals()
require.NoError(t, err)
deals = append(deals, d)
}

var responses []*ResponseBundle
for _, node := range newTns {
resp, err := node.dkg.ProcessDeals(deals)
require.NoError(t, err)
require.NotNil(t, resp)
// a node from the old group is not present so there should be
// some responses !
responses = append(responses, resp)
}
// all nodes in the new group should have reported an error
require.Equal(t, newN, len(responses))

results = nil
for _, node := range newTns {
res, just, err := node.dkg.ProcessResponses(responses)
// we should have enough old nodes available to get a successful DKG
require.NoError(t, err)
require.Nil(t, res)
// since the last old node is absent he can't give any justifications
require.Nil(t, just)
}

for _, node := range newTns {
res, err := node.dkg.ProcessJustifications(nil)
require.NoError(t, err)
require.NotNil(t, res)
results = append(results, res)
}
testResults(t, suite, newT, newN, results)

}
func TestDKGFull(t *testing.T) {
n := 5
thr := n
Expand Down Expand Up @@ -806,3 +911,23 @@ func TestDKGTooManyComplaints(t *testing.T) {
}
testResults(t, suite, thr, n, filtered)
}

func TestConfigDuplicate(t *testing.T) {
n := 5
nodes := make([]Node, n)
for i := 0; i < n; i++ {
nodes[i] = Node{
Index: Index(i),
Public: nil,
}
}
nodes[2].Index = nodes[1].Index
c := &Config{
OldNodes: nodes,
}
require.Error(t, c.CheckForDuplicates())
c = &Config{
NewNodes: nodes,
}
require.Error(t, c.CheckForDuplicates())
}

0 comments on commit 882a744

Please sign in to comment.