Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleaning up #393

Merged
merged 1 commit into from
Sep 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cmd/bench/bench.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ func main() {
securityParam := shared.T

tempdir, _ := os.MkdirTemp("", "poet-test")
defer os.RemoveAll(tempdir)

proofGenStarted := time.Now()
end := proofGenStarted.Add(cfg.Duration)
leafs, merkleProof, err := prover.GenerateProofWithoutPersistency(
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ require (
github.com/zeebo/blake3 v0.2.3
go.uber.org/mock v0.2.0
go.uber.org/zap v1.25.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.3.0
golang.org/x/sys v0.12.0
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
Expand Down
31 changes: 0 additions & 31 deletions hashfunc_test.go

This file was deleted.

5 changes: 1 addition & 4 deletions poet.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,8 @@ func poetMain() (err error) {
if err != nil {
return err
}
server.SetupConfig(cfg)

cfg, err = server.SetupConfig(cfg)
if err != nil {
return err
}
// Finally, parse the remaining command line options again to ensure
// they take precedence.
cfg, err = server.ParseFlags(cfg)
Expand Down
27 changes: 27 additions & 0 deletions prover/layer_factory.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package prover

import (
"fmt"
"path/filepath"

"github.com/spacemeshos/merkle-tree/cache"
"github.com/spacemeshos/merkle-tree/cache/readwriters"
)

// GetLayerFactory creates a merkle LayerFactory.
// The minMemoryLayer determines the threshold below which layers are saved on-disk, while layers equal and above -
// in-memory.
func GetLayerFactory(minMemoryLayer uint, datadir string, fileWriterBufSize uint) cache.LayerFactory {
return func(layerHeight uint) (cache.LayerReadWriter, error) {
if layerHeight < minMemoryLayer {
fileName := filepath.Join(datadir, fmt.Sprintf("layercache_%d.bin", layerHeight))
readWriter, err := readwriters.NewFileReadWriter(fileName, int(fileWriterBufSize))
if err != nil {
return nil, err
}

Check warning on line 21 in prover/layer_factory.go

View check run for this annotation

Codecov / codecov/patch

prover/layer_factory.go#L20-L21

Added lines #L20 - L21 were not covered by tests

return readWriter, nil
}
return &readwriters.SliceReadWriter{}, nil
}
}
85 changes: 39 additions & 46 deletions prover/prover.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"math"
"os"
"path/filepath"
"regexp"
Expand Down Expand Up @@ -43,17 +44,14 @@ type TreeConfig struct {

type persistFunc func(ctx context.Context, treeCache *cache.Writer, nextLeafId uint64) error

var persist persistFunc = func(context.Context, *cache.Writer, uint64) error { return nil }

// GenerateProof computes the PoET DAG, uses Fiat-Shamir to derive a challenge from the Merkle root and generates a
// Merkle proof using the challenge and the DAG.
// GenerateProof generates the Proof of Sequential Work. It stops when the given deadline is reached.
func GenerateProof(
ctx context.Context,
leavesCounter prometheus.Counter,
treeCfg TreeConfig,
labelHashFunc func(data []byte) []byte,
merkleHashFunc merkle.HashFunc,
limit time.Time,
deadline time.Time,
securityParam uint8,
persist persistFunc,
) (uint64, *shared.MerkleProof, error) {
Expand All @@ -63,17 +61,17 @@ func GenerateProof(
}
defer treeCache.Close()

return generateProof(ctx, leavesCounter, labelHashFunc, tree, treeCache, limit, 0, securityParam, persist)
return generateProof(ctx, leavesCounter, labelHashFunc, tree, treeCache, deadline, 0, securityParam, persist)
}

// GenerateProofRecovery recovers proof generation, from a given 'nextLeafID' and for a given 'parkedNodes' snapshot.
// GenerateProofRecovery recovers proof generation, from a given 'nextLeafID'.
func GenerateProofRecovery(
ctx context.Context,
leavesCounter prometheus.Counter,
treeCfg TreeConfig,
labelHashFunc func(data []byte) []byte,
merkleHashFunc merkle.HashFunc,
limit time.Time,
deadline time.Time,
securityParam uint8,
nextLeafID uint64,
persist persistFunc,
Expand All @@ -84,35 +82,50 @@ func GenerateProofRecovery(
}
defer treeCache.Close()

return generateProof(ctx, leavesCounter, labelHashFunc, tree, treeCache, limit, nextLeafID, securityParam, persist)
return generateProof(
ctx,
leavesCounter,
labelHashFunc,
tree,
treeCache,
deadline,
nextLeafID,
securityParam,
persist,
)
}

// GenerateProofWithoutPersistency calls GenerateProof with disabled persistency functionality
// and potential soft/hard-shutdown recovery.
// Meant to be used for testing purposes only. Doesn't expose metrics too.
// GenerateProofWithoutPersistency calls GenerateProof with disabled persistency functionality.
// Tree recovery will not be possible. Meant to be used for testing purposes only.
// It doesn't expose metrics too.
func GenerateProofWithoutPersistency(
ctx context.Context,
treeCfg TreeConfig,
labelHashFunc func(data []byte) []byte,
merkleHashFunc merkle.HashFunc,
limit time.Time,
deadline time.Time,
securityParam uint8,
) (uint64, *shared.MerkleProof, error) {
leavesCounter := prometheus.NewCounter(prometheus.CounterOpts{})
return GenerateProof(ctx, leavesCounter, treeCfg, labelHashFunc, merkleHashFunc, limit, securityParam, persist)
return GenerateProof(
ctx,
leavesCounter,
treeCfg,
labelHashFunc,
merkleHashFunc,
deadline,
securityParam,
func(context.Context, *cache.Writer, uint64) error { return nil },
)
}

func makeProofTree(treeCfg TreeConfig, merkleHashFunc merkle.HashFunc) (*merkle.Tree, *cache.Writer, error) {
if treeCfg.MinMemoryLayer < LowestMerkleMinMemoryLayer {
treeCfg.MinMemoryLayer = LowestMerkleMinMemoryLayer
}
metaFactory := NewReadWriterMetaFactory(treeCfg.MinMemoryLayer, treeCfg.Datadir, treeCfg.FileWriterBufSize)

minMemoryLayer := max(treeCfg.MinMemoryLayer, LowestMerkleMinMemoryLayer)
treeCache := cache.NewWriter(
cache.Combine(
cache.SpecificLayersPolicy(map[uint]bool{0: true}),
cache.MinHeightPolicy(MerkleMinCacheLayer)),
metaFactory.GetFactory(),
GetLayerFactory(minMemoryLayer, treeCfg.Datadir, treeCfg.FileWriterBufSize),
)

tree, err := merkle.NewTreeBuilder().WithHashFunc(merkleHashFunc).WithCacheWriter(treeCache).Build()
Expand All @@ -130,17 +143,15 @@ func makeRecoveryProofTree(
nextLeafID uint64,
) (*cache.Writer, *merkle.Tree, error) {
// Don't use memory cache. Just utilize the existing files cache.
maxUint := ^uint(0)
layerFactory := NewReadWriterMetaFactory(maxUint, treeCfg.Datadir, treeCfg.FileWriterBufSize).GetFactory()
layerFactory := GetLayerFactory(math.MaxUint, treeCfg.Datadir, treeCfg.FileWriterBufSize)

layersFiles, err := getLayersFiles(treeCfg.Datadir)
if err != nil {
return nil, nil, err
}

// Validate that layer 0 exists.
_, ok := layersFiles[0]
if !ok {
if _, ok := layersFiles[0]; !ok {
return nil, nil, fmt.Errorf("layer 0 cache file is missing")
}

Expand All @@ -149,11 +160,9 @@ func makeRecoveryProofTree(

// Validate structure.
for layer, file := range layersFiles {
if layer > topLayer {
topLayer = layer
}
topLayer = max(topLayer, layer)

readWriter, err := layerFactory(uint(layer))
readWriter, err := layerFactory(layer)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -225,7 +234,7 @@ func makeRecoveryProofTree(
parkedNodes = append(parkedNodes, memCachedParkedNodes...)

logging.FromContext(ctx).
Info("recovered parked nodes", zap.Array("nodes", zapcore.ArrayMarshalerFunc(func(enc zapcore.ArrayEncoder) error {
Debug("recovered parked nodes", zap.Array("nodes", zapcore.ArrayMarshalerFunc(func(enc zapcore.ArrayEncoder) error {
for _, node := range parkedNodes {
enc.AppendString(fmt.Sprintf("%X", node))
}
Expand Down Expand Up @@ -384,29 +393,13 @@ func getLayersFiles(datadir string) (map[uint]string, error) {
return files, nil
}

// Calculate the root of a Merkle Tree with given leaves.
func CalcTreeRoot(leaves [][]byte) ([]byte, error) {
tree, err := merkle.NewTreeBuilder().WithHashFunc(shared.HashMembershipTreeNode).Build()
if err != nil {
return nil, fmt.Errorf("failed to generate tree: %w", err)
}
for _, member := range leaves {
err := tree.AddLeaf(member)
if err != nil {
return nil, fmt.Errorf("failed to add leaf: %w", err)
}
}
return tree.Root(), nil
}

// build a small tree with the nodes from the top layer of the cache as leafs.
// this tree will be used to get parked nodes for the merkle tree.
func recoverMemCachedParkedNodes(
layerReader mshared.LayerReader,
merkleHashFunc merkle.HashFunc,
) ([][]byte, mshared.CacheReader, error) {
recoveryTreelayerFactory := NewReadWriterMetaFactory(0, "", 0).GetFactory()
recoveryTreeCache := cache.NewWriter(func(uint) bool { return true }, recoveryTreelayerFactory)
recoveryTreeCache := cache.NewWriter(func(uint) bool { return true }, GetLayerFactory(0, "", 0))

tree, err := merkle.NewTreeBuilder().WithHashFunc(merkleHashFunc).WithCacheWriter(recoveryTreeCache).Build()
if err != nil {
Expand Down
73 changes: 0 additions & 73 deletions prover/readwritermetafactory.go

This file was deleted.

7 changes: 2 additions & 5 deletions rpc/rpcserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,11 @@ import (

api "github.com/spacemeshos/poet/release/proto/go/rpc/api/v1"
"github.com/spacemeshos/poet/rpc"
"github.com/spacemeshos/poet/server"
)

func Test_Submit_DoesNotPanicOnMissingPubKey(t *testing.T) {
// Arrange
cfg := server.DefaultConfig()
sv := rpc.NewServer(nil, nil, cfg.Round.PhaseShift, cfg.Round.CycleGap)
sv := rpc.NewServer(nil, nil, 0, 0)

// Act
in := &api.SubmitRequest{}
Expand All @@ -35,8 +33,7 @@ func Test_Submit_DoesNotPanicOnMissingPubKey(t *testing.T) {

func Test_Submit_DoesNotPanicOnMissingSignature(t *testing.T) {
// Arrange
cfg := server.DefaultConfig()
sv := rpc.NewServer(nil, nil, cfg.Round.PhaseShift, cfg.Round.CycleGap)
sv := rpc.NewServer(nil, nil, 0, 0)
pub, _, err := ed25519.GenerateKey(nil)
require.NoError(t, err)

Expand Down
Loading