-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add a firestore key migration to Teleport 17 #46472
Changes from 5 commits
030072c
a4e45a9
ac792a4
29d14b7
c2f93ec
f74a94d
3fadc99
a26468f
1856068
503efb4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -190,6 +190,7 @@ func newRecord(from backend.Item, clock clockwork.Clock) record { | |
return r | ||
} | ||
|
||
// TODO(tigrato|rosstimothy): Simplify this function by removing the brokenRecord and legacyRecord struct | ||
func newRecordFromDoc(doc *firestore.DocumentSnapshot) (*record, error) { | ||
k, err := doc.DataAt(keyDocProperty) | ||
if err != nil { | ||
|
@@ -413,6 +414,10 @@ func New(ctx context.Context, params backend.Params, options Options) (*Backend, | |
go RetryingAsyncFunctionRunner(b.clientContext, linearConfig, b.Logger, b.purgeExpiredDocuments, "purgeExpiredDocuments") | ||
} | ||
|
||
// Migrate incorrect key types to the correct type. | ||
// Start the migration after a delay to allow the backend to start up and won't be affected by the migration. | ||
_ = b.clock.AfterFunc(5*time.Minute, b.migrateIncorrectKeyTypes) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Probably worth using a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Switched to interval. |
||
|
||
l.Info("Backend created.") | ||
return b, nil | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,159 @@ | ||
/* | ||
* Teleport | ||
* Copyright (C) 2023 Gravitational, Inc. | ||
* | ||
* This program is free software: you can redistribute it and/or modify | ||
* it under the terms of the GNU Affero General Public License as published by | ||
* the Free Software Foundation, either version 3 of the License, or | ||
* (at your option) any later version. | ||
* | ||
* This program is distributed in the hope that it will be useful, | ||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
* GNU Affero General Public License for more details. | ||
* | ||
* You should have received a copy of the GNU Affero General Public License | ||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
*/ | ||
|
||
package firestore | ||
|
||
import ( | ||
"context" | ||
"time" | ||
|
||
"cloud.google.com/go/firestore" | ||
"github.com/gravitational/trace" | ||
"github.com/sirupsen/logrus" | ||
|
||
"github.com/gravitational/teleport/lib/backend" | ||
) | ||
|
||
// migrateIncorrectKeyTypes migrates incorrect key types (backend.Key and string) to the correct type (bytes) | ||
// in the backend. This is necessary because the backend was incorrectly storing keys as strings and backend.Key | ||
// types and Firestore clients mapped them to different database types. This forces calling ReadRange 3 times. | ||
// This migration will fix the issue by converting all keys to the correct type (bytes). | ||
// TODO(tigrato|rosstimothy): DELETE In 19.0.0: Remove this migration in 19.0.0. | ||
func (b *Backend) migrateIncorrectKeyTypes() { | ||
var ( | ||
numberOfDocsMigrated int | ||
duration time.Duration | ||
) | ||
err := backend.RunWhileLocked( | ||
b.clientContext, | ||
backend.RunWhileLockedConfig{ | ||
LockConfiguration: backend.LockConfiguration{ | ||
LockName: "firestore_migrate_incorrect_key_types", | ||
Backend: b, | ||
TTL: 5 * time.Minute, | ||
RetryInterval: time.Minute, | ||
}, | ||
ReleaseCtxTimeout: 10 * time.Second, | ||
RefreshLockInterval: time.Minute, | ||
}, | ||
func(ctx context.Context) error { | ||
start := time.Now() | ||
defer func() { | ||
duration = time.Since(start) | ||
}() | ||
// backend.Key is converted to array of ints when sending to the db. | ||
toArray := func(key []byte) []any { | ||
arrKey := make([]any, len(key)) | ||
for i, b := range key { | ||
arrKey[i] = int(b) | ||
} | ||
return arrKey | ||
} | ||
nDocs, err := migrateKeyType[[]any](ctx, b, toArray) | ||
numberOfDocsMigrated += nDocs | ||
if err != nil { | ||
return trace.Wrap(err, "failed to migrate backend key") | ||
} | ||
|
||
stringKey := func(key []byte) string { | ||
return string(key) | ||
} | ||
nDocs, err = migrateKeyType[string](ctx, b, stringKey) | ||
numberOfDocsMigrated += nDocs | ||
if err != nil { | ||
return trace.Wrap(err, "failed to migrate legacy key") | ||
} | ||
return nil | ||
}) | ||
|
||
entry := b.Entry.WithFields(logrus.Fields{ | ||
"duration": duration, | ||
"migrated": numberOfDocsMigrated, | ||
}) | ||
if err != nil { | ||
entry.WithError(err).Error("Failed to migrate incorrect key types.") | ||
return | ||
} | ||
entry.Infof("Migrated %d incorrect key types", numberOfDocsMigrated) | ||
} | ||
|
||
func migrateKeyType[T any](ctx context.Context, b *Backend, newKey func([]byte) T) (int, error) { | ||
limit := 300 | ||
startKey := newKey([]byte("/")) | ||
|
||
bulkWriter := b.svc.BulkWriter(b.clientContext) | ||
defer bulkWriter.End() | ||
|
||
nDocs := 0 | ||
// handle the migration in batches of 300 documents per second | ||
t := time.NewTimer(time.Second) | ||
defer t.Stop() | ||
for range t.C { | ||
tigrato marked this conversation as resolved.
Show resolved
Hide resolved
|
||
docs, err := b.svc.Collection(b.CollectionName). | ||
// passing the key type here forces the client to map the key to the underlying type | ||
// and return all the keys in that share the same underlying type. | ||
// backend.Key is mapped to Array in Firestore. | ||
// []byte is mapped to Bytes in Firestore. | ||
// string is mapped to String in Firestore. | ||
// Searching for keys with the same underlying type will return all keys with the same type. | ||
Where(keyDocProperty, ">", startKey). | ||
Limit(limit). | ||
Documents(ctx).GetAll() | ||
if err != nil { | ||
return nDocs, trace.Wrap(err) | ||
} | ||
|
||
jobs := make([]*firestore.BulkWriterJob, len(docs)) | ||
for i, dbDoc := range docs { | ||
newDoc, err := newRecordFromDoc(dbDoc) | ||
if err != nil { | ||
return nDocs, trace.Wrap(err, "failed to convert document") | ||
} | ||
|
||
// use conditional update to ensure that the document has not been updated since the read | ||
jobs[i], err = bulkWriter.Update( | ||
b.svc.Collection(b.CollectionName). | ||
Doc(b.keyToDocumentID(newDoc.Key)), | ||
newDoc.updates(), | ||
firestore.LastUpdateTime(dbDoc.UpdateTime), | ||
) | ||
if err != nil { | ||
return nDocs, trace.Wrap(err, "failed stream bulk action") | ||
} | ||
|
||
startKey = newKey(newDoc.Key) // update start key | ||
} | ||
|
||
bulkWriter.Flush() // flush the buffer | ||
|
||
for _, job := range jobs { | ||
if _, err := job.Results(); err != nil { | ||
// log the error and continue | ||
b.Entry.WithError(err).Error("failed to write bulk action") | ||
} | ||
} | ||
|
||
nDocs += len(docs) | ||
if len(docs) < limit { | ||
break | ||
} | ||
|
||
t.Reset(time.Second) | ||
} | ||
return nDocs, nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you add a note indicating which version that it's safe to do this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Added in f74a94d