Skip to content

Commit

Permalink
Export crl shard calculation code for future use (#7127)
Browse files Browse the repository at this point in the history
Make crl.GetChunkAtTIme an exported function, so that it can be used by
the RA in a future PR without making that PR bigger and harder to
review. Also move it from being a method to an independent function
which takes two new arguments to compensate for the loss of its
receiver.

Also move some tests from batch_test to updater_test, where they should
have been in the first place.

Part of #7094
  • Loading branch information
aarongable authored Nov 2, 2023
1 parent 81cb970 commit 1d31a22
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 130 deletions.
119 changes: 0 additions & 119 deletions crl/updater/batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,122 +41,3 @@ func TestRunOnce(t *testing.T) {
test.AssertEquals(t, len(mockLog.GetAllMatching("Generating CRL failed:")), 4)
cu.tickHistogram.Reset()
}

func TestGetShardMappings(t *testing.T) {
// We set atTime to be exactly one day (numShards * shardWidth) after the
// anchorTime for these tests, so that we know that the index of the first
// chunk we would normally (i.e. not taking lookback or overshoot into
// account) care about is 0.
atTime := anchorTime().Add(24 * time.Hour)

// When there is no lookback, and the maxNotAfter is exactly as far in the
// future as the numShards * shardWidth looks, every shard should be mapped to
// exactly one chunk.
tcu := crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(23*time.Hour + 30*time.Minute)},
lookbackPeriod: 0,
}
m, err := tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting aligned shards")
test.AssertEquals(t, len(m), 24)
for _, s := range m {
test.AssertEquals(t, len(s), 1)
}

// When there is 1.5 hours each of lookback and maxNotAfter overshoot, then
// there should be four shards which each get two chunks mapped to them.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24*time.Hour + 90*time.Minute)},
lookbackPeriod: 90 * time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 0 || i == 1 || i == 22 || i == 23 {
test.AssertEquals(t, len(s), 2)
} else {
test.AssertEquals(t, len(s), 1)
}
}

// When there is a massive amount of overshoot, many chunks should be mapped
// to each shard.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(90 * 24 * time.Hour)},
lookbackPeriod: time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 23 {
test.AssertEquals(t, len(s), 91)
} else {
test.AssertEquals(t, len(s), 90)
}
}

// An arbitrarily-chosen chunk should always end up in the same shard no
// matter what the current time, lookback, and overshoot are, as long as the
// number of shards and the shard width remains constant.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24 * time.Hour)},
lookbackPeriod: time.Hour,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.lookbackPeriod = 4 * time.Hour
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.sa = &fakeSAC{maxNotAfter: atTime.Add(300 * 24 * time.Hour)}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
atTime = atTime.Add(6 * time.Hour)
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
}

func TestGetChunkAtTime(t *testing.T) {
// Our test updater divides time into chunks 1 day wide, numbered 0 through 9.
tcu := crlUpdater{
numShards: 10,
shardWidth: 24 * time.Hour,
}

// The chunk right at the anchor time should have index 0 and start at the
// anchor time. This also tests behavior when atTime is on a chunk boundary.
atTime := anchorTime()
c, err := tcu.getChunkAtTime(atTime)
test.AssertNotError(t, err, "getting chunk at anchor")
test.AssertEquals(t, c.idx, 0)
test.Assert(t, c.start.Equal(atTime), "getting chunk at anchor")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk at anchor")

// The chunk a bit over a year in the future should have index 5.
atTime = anchorTime().Add(365 * 24 * time.Hour)
c, err = tcu.getChunkAtTime(atTime.Add(1 * time.Minute))
test.AssertNotError(t, err, "getting chunk")
test.AssertEquals(t, c.idx, 5)
test.Assert(t, c.start.Equal(atTime), "getting chunk")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk")

// A chunk very far in the future should break the math. We have to add to
// the time twice, since the whole point of "very far in the future" is that
// it isn't representable by a time.Duration.
atTime = anchorTime().Add(200 * 365 * 24 * time.Hour).Add(200 * 365 * 24 * time.Hour)
c, err = tcu.getChunkAtTime(atTime)
test.AssertError(t, err, "getting far-future chunk")
}
23 changes: 12 additions & 11 deletions crl/updater/updater.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ func anchorTime() time.Time {
type chunk struct {
start time.Time
end time.Time
idx int
Idx int
}

// shardMap is a mapping of shard indices to the set of chunks which should be
Expand Down Expand Up @@ -417,28 +417,29 @@ func (cu *crlUpdater) getShardMappings(ctx context.Context, atTime time.Time) (s

// Find the id number and boundaries of the earliest chunk we care about.
first := atTime.Add(-cu.lookbackPeriod)
c, err := cu.getChunkAtTime(first)
c, err := GetChunkAtTime(cu.shardWidth, cu.numShards, first)
if err != nil {
return nil, err
}

// Iterate over chunks until we get completely beyond the farthest-future
// expiration.
for c.start.Before(lastExpiry.AsTime()) {
res[c.idx] = append(res[c.idx], c)
res[c.Idx] = append(res[c.Idx], c)
c = chunk{
start: c.end,
end: c.end.Add(cu.shardWidth),
idx: (c.idx + 1) % cu.numShards,
Idx: (c.Idx + 1) % cu.numShards,
}
}

return res, nil
}

// getChunkAtTime returns the chunk whose boundaries contain the given time.
// It is broken out solely for the purpose of unit testing.
func (cu *crlUpdater) getChunkAtTime(atTime time.Time) (chunk, error) {
// GetChunkAtTime returns the chunk whose boundaries contain the given time.
// It is exported so that it can be used by both the crl-updater and the RA
// as we transition from dynamic to static shard mappings.
func GetChunkAtTime(shardWidth time.Duration, numShards int, atTime time.Time) (chunk, error) {
// Compute the amount of time between the current time and the anchor time.
timeSinceAnchor := atTime.Sub(anchorTime())
if timeSinceAnchor == time.Duration(math.MaxInt64) || timeSinceAnchor < 0 {
Expand All @@ -447,13 +448,13 @@ func (cu *crlUpdater) getChunkAtTime(atTime time.Time) (chunk, error) {

// Determine how many full chunks fit within that time, and from that the
// index number of the desired chunk.
chunksSinceAnchor := timeSinceAnchor.Nanoseconds() / cu.shardWidth.Nanoseconds()
chunkIdx := int(chunksSinceAnchor) % cu.numShards
chunksSinceAnchor := timeSinceAnchor.Nanoseconds() / shardWidth.Nanoseconds()
chunkIdx := int(chunksSinceAnchor) % numShards

// Determine the boundaries of the chunk.
timeSinceChunk := time.Duration(timeSinceAnchor.Nanoseconds() % cu.shardWidth.Nanoseconds())
timeSinceChunk := time.Duration(timeSinceAnchor.Nanoseconds() % shardWidth.Nanoseconds())
left := atTime.Add(-timeSinceChunk)
right := left.Add(cu.shardWidth)
right := left.Add(shardWidth)

return chunk{left, right, chunkIdx}, nil
}
117 changes: 117 additions & 0 deletions crl/updater/updater_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,3 +282,120 @@ func TestUpdateShardWithRetry(t *testing.T) {
test.Assert(t, startTime.Add(15*0.8*time.Second).Before(cu.clk.Now()), "retries didn't sleep enough")
test.Assert(t, startTime.Add(15*1.2*time.Second).After(cu.clk.Now()), "retries slept too much")
}

func TestGetShardMappings(t *testing.T) {
// We set atTime to be exactly one day (numShards * shardWidth) after the
// anchorTime for these tests, so that we know that the index of the first
// chunk we would normally (i.e. not taking lookback or overshoot into
// account) care about is 0.
atTime := anchorTime().Add(24 * time.Hour)

// When there is no lookback, and the maxNotAfter is exactly as far in the
// future as the numShards * shardWidth looks, every shard should be mapped to
// exactly one chunk.
tcu := crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(23*time.Hour + 30*time.Minute)},
lookbackPeriod: 0,
}
m, err := tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting aligned shards")
test.AssertEquals(t, len(m), 24)
for _, s := range m {
test.AssertEquals(t, len(s), 1)
}

// When there is 1.5 hours each of lookback and maxNotAfter overshoot, then
// there should be four shards which each get two chunks mapped to them.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24*time.Hour + 90*time.Minute)},
lookbackPeriod: 90 * time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 0 || i == 1 || i == 22 || i == 23 {
test.AssertEquals(t, len(s), 2)
} else {
test.AssertEquals(t, len(s), 1)
}
}

// When there is a massive amount of overshoot, many chunks should be mapped
// to each shard.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(90 * 24 * time.Hour)},
lookbackPeriod: time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 23 {
test.AssertEquals(t, len(s), 91)
} else {
test.AssertEquals(t, len(s), 90)
}
}

// An arbitrarily-chosen chunk should always end up in the same shard no
// matter what the current time, lookback, and overshoot are, as long as the
// number of shards and the shard width remains constant.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24 * time.Hour)},
lookbackPeriod: time.Hour,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.lookbackPeriod = 4 * time.Hour
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.sa = &fakeSAC{maxNotAfter: atTime.Add(300 * 24 * time.Hour)}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
atTime = atTime.Add(6 * time.Hour)
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
}

func TestGetChunkAtTime(t *testing.T) {
// Our test updater divides time into chunks 1 day wide, numbered 0 through 9.
numShards := 10
shardWidth := 24 * time.Hour

// The chunk right at the anchor time should have index 0 and start at the
// anchor time. This also tests behavior when atTime is on a chunk boundary.
atTime := anchorTime()
c, err := GetChunkAtTime(shardWidth, numShards, atTime)
test.AssertNotError(t, err, "getting chunk at anchor")
test.AssertEquals(t, c.Idx, 0)
test.Assert(t, c.start.Equal(atTime), "getting chunk at anchor")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk at anchor")

// The chunk a bit over a year in the future should have index 5.
atTime = anchorTime().Add(365 * 24 * time.Hour)
c, err = GetChunkAtTime(shardWidth, numShards, atTime.Add(time.Minute))
test.AssertNotError(t, err, "getting chunk")
test.AssertEquals(t, c.Idx, 5)
test.Assert(t, c.start.Equal(atTime), "getting chunk")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk")

// A chunk very far in the future should break the math. We have to add to
// the time twice, since the whole point of "very far in the future" is that
// it isn't representable by a time.Duration.
atTime = anchorTime().Add(200 * 365 * 24 * time.Hour).Add(200 * 365 * 24 * time.Hour)
c, err = GetChunkAtTime(shardWidth, numShards, atTime)
test.AssertError(t, err, "getting far-future chunk")
}

0 comments on commit 1d31a22

Please sign in to comment.