diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index e78da149..93c2ff66 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -75,6 +75,15 @@ jobs: type=raw,value=latest,enable=${{ !contains(env.TAG, 'beta') }} type=semver,pattern={{raw}} + - name: Set platforms based on tag + id: set-platforms + run: | + if [[ "${{ env.TAG }}" == *"beta"* ]]; then + echo "PLATFORMS=linux/amd64" >> $GITHUB_ENV + else + echo "PLATFORMS=linux/amd64,linux/arm64,linux/arm64/v8" >> $GITHUB_ENV + fi + # Build and push Docker image with Buildx (don't push on PR) # https://github.com/docker/build-push-action - name: Build and push Docker image @@ -84,7 +93,7 @@ jobs: context: . file: ${{ matrix.dockerfile }} push: ${{ github.event_name != 'pull_request' }} - platforms: linux/amd64,linux/arm64,linux/arm64/v8 + platforms: ${{ env.PLATFORMS }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha diff --git a/client/client.go b/client/client.go index 0cb8bd57..714845b3 100644 --- a/client/client.go +++ b/client/client.go @@ -36,6 +36,8 @@ type Client interface { GetChunks(ctx context.Context, domain string, timelines []string, queryTime time.Time, opts *Options) (map[string]core.Chunk, error) GetKey(ctx context.Context, domain, id string, opts *Options) ([]core.Key, error) GetDomain(ctx context.Context, domain string, opts *Options) (core.Domain, error) + GetChunkItrs(ctx context.Context, domain string, timelines []string, epoch string, opts *Options) (map[string]string, error) + GetChunkBodies(ctx context.Context, domain string, query map[string]string, opts *Options) (map[string]core.Chunk, error) } type client struct { @@ -215,7 +217,10 @@ func (c *client) GetEntity(ctx context.Context, domain, address string, opts *Op return core.Entity{}, fmt.Errorf("Domain is offline") } - response, err := httpRequest[core.Entity](ctx, &c.client, "GET", "https://"+domain+"/api/v1/entity/"+address, "", opts) + url := "https://" + domain + "/api/v1/entity/" + address + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Entity](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -238,7 +243,10 @@ func (c *client) GetMessage(ctx context.Context, domain, id string, opts *Option } - response, err := httpRequest[core.Message](ctx, &c.client, "GET", "https://"+domain+"/api/v1/message/"+id, "", opts) + url := "https://" + domain + "/api/v1/message/" + id + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Message](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -260,7 +268,10 @@ func (c *client) GetAssociation(ctx context.Context, domain, id string, opts *Op return core.Association{}, fmt.Errorf("Domain is offline") } - response, err := httpRequest[core.Association](ctx, &c.client, "GET", "https://"+domain+"/api/v1/association/"+id, "", opts) + url := "https://" + domain + "/api/v1/association/" + id + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Association](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -282,7 +293,10 @@ func (c *client) GetProfile(ctx context.Context, domain, id string, opts *Option return core.Profile{}, fmt.Errorf("Domain is offline") } - response, err := httpRequest[core.Profile](ctx, &c.client, "GET", "https://"+domain+"/api/v1/profile/"+id, "", opts) + url := "https://" + domain + "/api/v1/profile/" + id + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Profile](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -304,7 +318,10 @@ func (c *client) GetTimeline(ctx context.Context, domain, id string, opts *Optio return core.Timeline{}, fmt.Errorf("Domain is offline") } - response, err := httpRequest[core.Timeline](ctx, &c.client, "GET", "https://"+domain+"/api/v1/timeline/"+id, "", opts) + url := "https://" + domain + "/api/v1/timeline/" + id + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Timeline](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -329,7 +346,67 @@ func (c *client) GetChunks(ctx context.Context, domain string, timelines []strin timelinesStr := strings.Join(timelines, ",") timeStr := fmt.Sprintf("%d", queryTime.Unix()) - response, err := httpRequest[map[string]core.Chunk](ctx, &c.client, "GET", "https://"+domain+"/api/v1/timelines/chunks?timelines="+timelinesStr+"&time="+timeStr, "", opts) + url := "https://" + domain + "/api/v1/timelines/chunks?timelines=" + timelinesStr + "&time=" + timeStr + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[map[string]core.Chunk](ctx, &c.client, "GET", url, "", opts) + if err != nil { + span.RecordError(err) + + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + c.lastFailed[domain] = time.Now() + } + + return nil, err + } + + return *response, nil +} + +func (c *client) GetChunkItrs(ctx context.Context, domain string, timelines []string, epoch string, opts *Options) (map[string]string, error) { + ctx, span := tracer.Start(ctx, "Client.GetChunkItrs") + defer span.End() + + if !c.IsOnline(domain) { + return nil, fmt.Errorf("Domain is offline") + } + + timelinesStr := strings.Join(timelines, ",") + + url := "https://" + domain + "/api/v1/chunks/itr?timelines=" + timelinesStr + "&epoch=" + epoch + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[map[string]string](ctx, &c.client, "GET", url, "", opts) + if err != nil { + span.RecordError(err) + + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + c.lastFailed[domain] = time.Now() + } + + return nil, err + } + + return *response, nil +} + +func (c *client) GetChunkBodies(ctx context.Context, domain string, query map[string]string, opts *Options) (map[string]core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Client.GetChunkBodies") + defer span.End() + + if !c.IsOnline(domain) { + return nil, fmt.Errorf("Domain is offline") + } + + queries := []string{} + for key, value := range query { + queries = append(queries, key+":"+value) + } + + url := "https://" + domain + "/api/v1/chunks/body?query=" + strings.Join(queries, ",") + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[map[string]core.Chunk](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -351,7 +428,10 @@ func (c *client) GetKey(ctx context.Context, domain, id string, opts *Options) ( return nil, fmt.Errorf("Domain is offline") } - response, err := httpRequest[[]core.Key](ctx, &c.client, "GET", "https://"+domain+"/api/v1/key/"+id, "", opts) + url := "https://" + domain + "/api/v1/key/" + id + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[[]core.Key](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) @@ -373,7 +453,10 @@ func (c *client) GetDomain(ctx context.Context, domain string, opts *Options) (c return core.Domain{}, fmt.Errorf("Domain is offline") } - response, err := httpRequest[core.Domain](ctx, &c.client, "GET", "https://"+domain+"/api/v1/domain", "", opts) + url := "https://" + domain + "/api/v1/domain" + span.SetAttributes(attribute.String("url", url)) + + response, err := httpRequest[core.Domain](ctx, &c.client, "GET", url, "", opts) if err != nil { span.RecordError(err) diff --git a/client/mock/client.go b/client/mock/client.go index b8a6f9f8..cb775cff 100644 --- a/client/mock/client.go +++ b/client/mock/client.go @@ -73,6 +73,36 @@ func (mr *MockClientMockRecorder) GetAssociation(ctx, domain, id, opts any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAssociation", reflect.TypeOf((*MockClient)(nil).GetAssociation), ctx, domain, id, opts) } +// GetChunkBodies mocks base method. +func (m *MockClient) GetChunkBodies(ctx context.Context, domain string, query map[string]string, opts *client.Options) (map[string]core.Chunk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChunkBodies", ctx, domain, query, opts) + ret0, _ := ret[0].(map[string]core.Chunk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChunkBodies indicates an expected call of GetChunkBodies. +func (mr *MockClientMockRecorder) GetChunkBodies(ctx, domain, query, opts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunkBodies", reflect.TypeOf((*MockClient)(nil).GetChunkBodies), ctx, domain, query, opts) +} + +// GetChunkItrs mocks base method. +func (m *MockClient) GetChunkItrs(ctx context.Context, domain string, timelines []string, epoch string, opts *client.Options) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChunkItrs", ctx, domain, timelines, epoch, opts) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChunkItrs indicates an expected call of GetChunkItrs. +func (mr *MockClientMockRecorder) GetChunkItrs(ctx, domain, timelines, epoch, opts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunkItrs", reflect.TypeOf((*MockClient)(nil).GetChunkItrs), ctx, domain, timelines, epoch, opts) +} + // GetChunks mocks base method. func (m *MockClient) GetChunks(ctx context.Context, domain string, timelines []string, queryTime time.Time, opts *client.Options) (map[string]core.Chunk, error) { m.ctrl.T.Helper() diff --git a/cmd/api/main.go b/cmd/api/main.go index 959c76d7..3811acfc 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -218,11 +218,12 @@ func main() { defer mc.Close() client := client.NewClient() + timelineKeeper := timeline.NewKeeper(rdb, mc, client, conconf) globalPolicy := concurrent.GetDefaultGlobalPolicy() policy := concurrent.SetupPolicyService(rdb, globalPolicy, conconf) - agent := concurrent.SetupAgent(db, rdb, mc, client, policy, conconf, config.Server.RepositoryPath) + agent := concurrent.SetupAgent(db, rdb, mc, timelineKeeper, client, policy, conconf, config.Server.RepositoryPath) domainService := concurrent.SetupDomainService(db, client, conconf) domainHandler := domain.NewHandler(domainService) @@ -230,16 +231,16 @@ func main() { userKvService := concurrent.SetupUserkvService(db) userkvHandler := userkv.NewHandler(userKvService) - messageService := concurrent.SetupMessageService(db, rdb, mc, client, policy, conconf) + messageService := concurrent.SetupMessageService(db, rdb, mc, timelineKeeper, client, policy, conconf) messageHandler := message.NewHandler(messageService) - associationService := concurrent.SetupAssociationService(db, rdb, mc, client, policy, conconf) + associationService := concurrent.SetupAssociationService(db, rdb, mc, timelineKeeper, client, policy, conconf) associationHandler := association.NewHandler(associationService) profileService := concurrent.SetupProfileService(db, rdb, mc, client, policy, conconf) profileHandler := profile.NewHandler(profileService) - timelineService := concurrent.SetupTimelineService(db, rdb, mc, client, policy, conconf) + timelineService := concurrent.SetupTimelineService(db, rdb, mc, timelineKeeper, client, policy, conconf) timelineHandler := timeline.NewHandler(timelineService) entityService := concurrent.SetupEntityService(db, rdb, mc, client, policy, conconf) @@ -254,7 +255,7 @@ func main() { ackService := concurrent.SetupAckService(db, rdb, mc, client, policy, conconf) ackHandler := ack.NewHandler(ackService) - storeService := concurrent.SetupStoreService(db, rdb, mc, client, policy, conconf, config.Server.RepositoryPath) + storeService := concurrent.SetupStoreService(db, rdb, mc, timelineKeeper, client, policy, conconf, config.Server.RepositoryPath) storeHandler := store.NewHandler(storeService) subscriptionService := concurrent.SetupSubscriptionService(db, rdb, mc, client, policy, conconf) @@ -383,14 +384,6 @@ func main() { ) prometheus.MustRegister(resourceCountMetrics) - var timelineRealtimeConnectionMetrics = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "cc_timeline_realtime_connections", - Help: "timeline realtime connections", - }, - ) - prometheus.MustRegister(timelineRealtimeConnectionMetrics) - go func() { for { time.Sleep(15 * time.Second) @@ -440,13 +433,13 @@ func main() { } resourceCountMetrics.WithLabelValues("timeline").Set(float64(count)) - count = timelineService.CurrentRealtimeConnectionCount() - timelineRealtimeConnectionMetrics.Set(float64(count)) + timelineService.UpdateMetrics() } }() e.GET("/metrics", echoprometheus.NewHandler()) + timelineKeeper.Start(context.Background()) agent.Boot() port := ":8000" diff --git a/core/dbschema.go b/core/dbschema.go index 34c133a5..e2ac5e87 100644 --- a/core/dbschema.go +++ b/core/dbschema.go @@ -144,12 +144,12 @@ type Timeline struct { // immutable type TimelineItem struct { ResourceID string `json:"resourceID" gorm:"primaryKey;type:char(27);"` - TimelineID string `json:"timelineID" gorm:"primaryKey;type:char(26);"` + TimelineID string `json:"timelineID" gorm:"primaryKey;type:char(26);index:idx_timeline_id_c_date"` Owner string `json:"owner" gorm:"type:char(42);"` Author *string `json:"author,omitempty" gorm:"type:char(42);"` SchemaID uint `json:"-"` Schema string `json:"schema,omitempty" gorm:"-"` - CDate time.Time `json:"cdate,omitempty" gorm:"->;<-:create;type:timestamp with time zone;not null;default:clock_timestamp()"` + CDate time.Time `json:"cdate,omitempty" gorm:"->;<-:create;type:timestamp with time zone;not null;default:clock_timestamp();index:idx_timeline_id_c_date"` } type Ack struct { diff --git a/core/function.go b/core/function.go index ffa0b2ac..3a7de9a0 100644 --- a/core/function.go +++ b/core/function.go @@ -6,14 +6,28 @@ import ( "time" ) +const ( + chunkLength = 600 +) + func Time2Chunk(t time.Time) string { // chunk by 10 minutes - return fmt.Sprintf("%d", (t.Unix()/600)*600) + return fmt.Sprintf("%d", (t.Unix()/chunkLength)*chunkLength) +} + +func NextChunk(chunk string) string { + i, _ := strconv.ParseInt(chunk, 10, 64) + return fmt.Sprintf("%d", i+chunkLength) +} + +func PrevChunk(chunk string) string { + i, _ := strconv.ParseInt(chunk, 10, 64) + return fmt.Sprintf("%d", i-chunkLength) } func Chunk2RecentTime(chunk string) time.Time { i, _ := strconv.ParseInt(chunk, 10, 64) - return time.Unix(i+600, 0) + return time.Unix(i+chunkLength, 0) } func Chunk2ImmediateTime(chunk string) time.Time { @@ -21,6 +35,11 @@ func Chunk2ImmediateTime(chunk string) time.Time { return time.Unix(i, 0) } +func EpochTime(epoch string) time.Time { + i, _ := strconv.ParseInt(epoch, 10, 64) + return time.Unix(i, 0) +} + func TypedIDToType(id string) string { if len(id) != 27 { return "" diff --git a/core/interfaces.go b/core/interfaces.go index 1dd2e25b..5a752dcc 100644 --- a/core/interfaces.go +++ b/core/interfaces.go @@ -173,18 +173,18 @@ type TimelineService interface { ListTimelineBySchema(ctx context.Context, schema string) ([]Timeline, error) ListTimelineByAuthor(ctx context.Context, author string) ([]Timeline, error) - GetChunks(ctx context.Context, timelines []string, pivot time.Time) (map[string]Chunk, error) - GetChunksFromRemote(ctx context.Context, host string, timelines []string, pivot time.Time) (map[string]Chunk, error) + GetChunks(ctx context.Context, timelines []string, epoch string) (map[string]Chunk, error) ListTimelineSubscriptions(ctx context.Context) (map[string]int64, error) Count(ctx context.Context) (int64, error) - CurrentRealtimeConnectionCount() int64 NormalizeTimelineID(ctx context.Context, timeline string) (string, error) GetOwners(ctx context.Context, timelines []string) ([]string, error) Query(ctx context.Context, timelineID, schema, owner, author string, until time.Time, limit int) ([]TimelineItem, error) Realtime(ctx context.Context, request <-chan []string, response chan<- Event) + + UpdateMetrics() } type JobService interface { diff --git a/core/mock/services.go b/core/mock/services.go index 947284e2..0e155084 100644 --- a/core/mock/services.go +++ b/core/mock/services.go @@ -1757,21 +1757,6 @@ func (mr *MockTimelineServiceMockRecorder) GetChunks(ctx, timelines, pivot any) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunks", reflect.TypeOf((*MockTimelineService)(nil).GetChunks), ctx, timelines, pivot) } -// GetChunksFromRemote mocks base method. -func (m *MockTimelineService) GetChunksFromRemote(ctx context.Context, host string, timelines []string, pivot time.Time) (map[string]core.Chunk, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChunksFromRemote", ctx, host, timelines, pivot) - ret0, _ := ret[0].(map[string]core.Chunk) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChunksFromRemote indicates an expected call of GetChunksFromRemote. -func (mr *MockTimelineServiceMockRecorder) GetChunksFromRemote(ctx, host, timelines, pivot any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunksFromRemote", reflect.TypeOf((*MockTimelineService)(nil).GetChunksFromRemote), ctx, host, timelines, pivot) -} - // GetImmediateItems mocks base method. func (m *MockTimelineService) GetImmediateItems(ctx context.Context, timelines []string, since time.Time, limit int) ([]core.TimelineItem, error) { m.ctrl.T.Helper() @@ -1937,6 +1922,36 @@ func (mr *MockTimelineServiceMockRecorder) ListTimelineSubscriptions(ctx any) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTimelineSubscriptions", reflect.TypeOf((*MockTimelineService)(nil).ListTimelineSubscriptions), ctx) } +// LoadChunkBody mocks base method. +func (m *MockTimelineService) LoadChunkBody(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadChunkBody", ctx, query) + ret0, _ := ret[0].(map[string]core.Chunk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadChunkBody indicates an expected call of LoadChunkBody. +func (mr *MockTimelineServiceMockRecorder) LoadChunkBody(ctx, query any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadChunkBody", reflect.TypeOf((*MockTimelineService)(nil).LoadChunkBody), ctx, query) +} + +// LookupChunkItr mocks base method. +func (m *MockTimelineService) LookupChunkItr(ctx context.Context, timeliens []string, epoch string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LookupChunkItr", ctx, timeliens, epoch) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LookupChunkItr indicates an expected call of LookupChunkItr. +func (mr *MockTimelineServiceMockRecorder) LookupChunkItr(ctx, timeliens, epoch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LookupChunkItr", reflect.TypeOf((*MockTimelineService)(nil).LookupChunkItr), ctx, timeliens, epoch) +} + // NormalizeTimelineID mocks base method. func (m *MockTimelineService) NormalizeTimelineID(ctx context.Context, timeline string) (string, error) { m.ctrl.T.Helper() @@ -1981,6 +1996,21 @@ func (mr *MockTimelineServiceMockRecorder) PublishEvent(ctx, event any) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishEvent", reflect.TypeOf((*MockTimelineService)(nil).PublishEvent), ctx, event) } +// Query mocks base method. +func (m *MockTimelineService) Query(ctx context.Context, timelineID, schema, owner, author string, until time.Time, limit int) ([]core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, timelineID, schema, owner, author, until, limit) + ret0, _ := ret[0].([]core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query. +func (mr *MockTimelineServiceMockRecorder) Query(ctx, timelineID, schema, owner, author, until, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTimelineService)(nil).Query), ctx, timelineID, schema, owner, author, until, limit) +} + // Realtime mocks base method. func (m *MockTimelineService) Realtime(ctx context.Context, request <-chan []string, response chan<- core.Event) { m.ctrl.T.Helper() @@ -2023,6 +2053,18 @@ func (mr *MockTimelineServiceMockRecorder) Retract(ctx, mode, document, signatur return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Retract", reflect.TypeOf((*MockTimelineService)(nil).Retract), ctx, mode, document, signature) } +// UpdateMetrics mocks base method. +func (m *MockTimelineService) UpdateMetrics() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateMetrics") +} + +// UpdateMetrics indicates an expected call of UpdateMetrics. +func (mr *MockTimelineServiceMockRecorder) UpdateMetrics() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMetrics", reflect.TypeOf((*MockTimelineService)(nil).UpdateMetrics)) +} + // UpsertTimeline mocks base method. func (m *MockTimelineService) UpsertTimeline(ctx context.Context, mode core.CommitMode, document, signature string) (core.Timeline, error) { m.ctrl.T.Helper() diff --git a/core/model.go b/core/model.go index 346fa5c4..63acd0a8 100644 --- a/core/model.go +++ b/core/model.go @@ -4,11 +4,11 @@ import () // Event is websocket root packet model type Event struct { - Timeline string `json:"timeline"` // stream full id (ex: @) - Item TimelineItem `json:"item,omitempty"` - Resource any `json:"resource,omitempty"` - Document string `json:"document"` - Signature string `json:"signature"` + Timeline string `json:"timeline"` // stream full id (ex: @) + Item *TimelineItem `json:"item,omitempty"` + Resource any `json:"resource,omitempty"` + Document string `json:"document"` + Signature string `json:"signature"` } type Chunk struct { diff --git a/wire.go b/wire.go index 875a1040..9ca817b1 100644 --- a/wire.go +++ b/wire.go @@ -104,7 +104,7 @@ func SetupKeyService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client return nil } -func SetupMessageService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, policy core.PolicyService, config core.Config) core.MessageService { +func SetupMessageService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client client.Client, policy core.PolicyService, config core.Config) core.MessageService { wire.Build(messageServiceProvider) return nil } @@ -114,12 +114,12 @@ func SetupProfileService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, cl return nil } -func SetupAssociationService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, policy core.PolicyService, config core.Config) core.AssociationService { +func SetupAssociationService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client client.Client, policy core.PolicyService, config core.Config) core.AssociationService { wire.Build(associationServiceProvider) return nil } -func SetupTimelineService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, policy core.PolicyService, config core.Config) core.TimelineService { +func SetupTimelineService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client client.Client, policy core.PolicyService, config core.Config) core.TimelineService { wire.Build(timelineServiceProvider) return nil } @@ -134,7 +134,7 @@ func SetupEntityService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, cli return nil } -func SetupAgent(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, policy core.PolicyService, config core.Config, repositoryPath string) core.AgentService { +func SetupAgent(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client client.Client, policy core.PolicyService, config core.Config, repositoryPath string) core.AgentService { wire.Build(agentServiceProvider) return nil } @@ -154,7 +154,7 @@ func SetupSchemaService(db *gorm.DB) core.SchemaService { return nil } -func SetupStoreService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, policy core.PolicyService, config core.Config, repositoryPath string) core.StoreService { +func SetupStoreService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client client.Client, policy core.PolicyService, config core.Config, repositoryPath string) core.StoreService { wire.Build(storeServiceProvider) return nil } diff --git a/wire_gen.go b/wire_gen.go index 25195533..022889ee 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -67,12 +67,12 @@ func SetupKeyService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client return keyService } -func SetupMessageService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client2 client.Client, policy2 core.PolicyService, config core.Config) core.MessageService { +func SetupMessageService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client2 client.Client, policy2 core.PolicyService, config core.Config) core.MessageService { schemaService := SetupSchemaService(db) repository := message.NewRepository(db, mc, schemaService) entityService := SetupEntityService(db, rdb, mc, client2, policy2, config) domainService := SetupDomainService(db, client2, config) - timelineService := SetupTimelineService(db, rdb, mc, client2, policy2, config) + timelineService := SetupTimelineService(db, rdb, mc, keeper, client2, policy2, config) keyService := SetupKeyService(db, rdb, mc, client2, config) messageService := message.NewService(repository, client2, entityService, domainService, timelineService, keyService, policy2, config) return messageService @@ -87,23 +87,23 @@ func SetupProfileService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, cl return profileService } -func SetupAssociationService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client2 client.Client, policy2 core.PolicyService, config core.Config) core.AssociationService { +func SetupAssociationService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client2 client.Client, policy2 core.PolicyService, config core.Config) core.AssociationService { schemaService := SetupSchemaService(db) repository := association.NewRepository(db, mc, schemaService) entityService := SetupEntityService(db, rdb, mc, client2, policy2, config) domainService := SetupDomainService(db, client2, config) profileService := SetupProfileService(db, rdb, mc, client2, policy2, config) - timelineService := SetupTimelineService(db, rdb, mc, client2, policy2, config) + timelineService := SetupTimelineService(db, rdb, mc, keeper, client2, policy2, config) subscriptionService := SetupSubscriptionService(db, rdb, mc, client2, policy2, config) - messageService := SetupMessageService(db, rdb, mc, client2, policy2, config) + messageService := SetupMessageService(db, rdb, mc, keeper, client2, policy2, config) keyService := SetupKeyService(db, rdb, mc, client2, config) associationService := association.NewService(repository, client2, entityService, domainService, profileService, timelineService, subscriptionService, messageService, keyService, policy2, config) return associationService } -func SetupTimelineService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client2 client.Client, policy2 core.PolicyService, config core.Config) core.TimelineService { +func SetupTimelineService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client2 client.Client, policy2 core.PolicyService, config core.Config) core.TimelineService { schemaService := SetupSchemaService(db) - repository := timeline.NewRepository(db, rdb, mc, client2, schemaService, config) + repository := timeline.NewRepository(db, rdb, mc, keeper, client2, schemaService, config) entityService := SetupEntityService(db, rdb, mc, client2, policy2, config) domainService := SetupDomainService(db, client2, config) semanticIDService := SetupSemanticidService(db) @@ -127,8 +127,8 @@ func SetupEntityService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, cli return entityService } -func SetupAgent(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client2 client.Client, policy2 core.PolicyService, config core.Config, repositoryPath string) core.AgentService { - storeService := SetupStoreService(db, rdb, mc, client2, policy2, config, repositoryPath) +func SetupAgent(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client2 client.Client, policy2 core.PolicyService, config core.Config, repositoryPath string) core.AgentService { + storeService := SetupStoreService(db, rdb, mc, keeper, client2, policy2, config, repositoryPath) jobService := SetupJobService(db) agentService := agent.NewAgent(mc, rdb, storeService, jobService, client2, config, repositoryPath) return agentService @@ -154,14 +154,14 @@ func SetupSchemaService(db *gorm.DB) core.SchemaService { return schemaService } -func SetupStoreService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client2 client.Client, policy2 core.PolicyService, config core.Config, repositoryPath string) core.StoreService { +func SetupStoreService(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper timeline.Keeper, client2 client.Client, policy2 core.PolicyService, config core.Config, repositoryPath string) core.StoreService { repository := store.NewRepository(rdb) keyService := SetupKeyService(db, rdb, mc, client2, config) entityService := SetupEntityService(db, rdb, mc, client2, policy2, config) - messageService := SetupMessageService(db, rdb, mc, client2, policy2, config) - associationService := SetupAssociationService(db, rdb, mc, client2, policy2, config) + messageService := SetupMessageService(db, rdb, mc, keeper, client2, policy2, config) + associationService := SetupAssociationService(db, rdb, mc, keeper, client2, policy2, config) profileService := SetupProfileService(db, rdb, mc, client2, policy2, config) - timelineService := SetupTimelineService(db, rdb, mc, client2, policy2, config) + timelineService := SetupTimelineService(db, rdb, mc, keeper, client2, policy2, config) ackService := SetupAckService(db, rdb, mc, client2, policy2, config) subscriptionService := SetupSubscriptionService(db, rdb, mc, client2, policy2, config) semanticIDService := SetupSemanticidService(db) diff --git a/x/agent/main.go b/x/agent/main.go index dabede16..3fcc9245 100644 --- a/x/agent/main.go +++ b/x/agent/main.go @@ -51,12 +51,6 @@ func NewAgent( func (a *agent) Boot() { slog.Info("agent start!") - ctx := context.Background() - - go a.watchEventRoutine(ctx) - go a.chunkUpdaterRoutine(ctx) - go a.connectionKeeperRoutine(ctx) - ticker60A := time.NewTicker(60 * time.Second) go func() { for { diff --git a/x/association/repository.go b/x/association/repository.go index 7e694eef..c313c288 100644 --- a/x/association/repository.go +++ b/x/association/repository.go @@ -2,12 +2,13 @@ package association import ( "context" - "errors" - "github.com/bradfitz/gomemcache/memcache" - "github.com/totegamma/concurrent/core" "gorm.io/gorm" "log/slog" "strconv" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/pkg/errors" + "github.com/totegamma/concurrent/core" ) // Repository is the interface for association repository @@ -34,19 +35,19 @@ type repository struct { // NewRepository creates a new association repository func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) Repository { + return &repository{db, mc, schema} +} +func (r *repository) setCurrentCount() { var count int64 - err := db.Model(&core.Association{}).Count(&count).Error + err := r.db.Model(&core.Association{}).Count(&count).Error if err != nil { slog.Error( "failed to count associations", slog.String("error", err.Error()), ) } - - mc.Set(&memcache.Item{Key: "association_count", Value: []byte(strconv.FormatInt(count, 10))}) - - return &repository{db, mc, schema} + r.mc.Set(&memcache.Item{Key: "association_count", Value: []byte(strconv.FormatInt(count, 10))}) } // Total returns the total number of associations @@ -57,6 +58,10 @@ func (r *repository) Count(ctx context.Context) (int64, error) { item, err := r.mc.Get("association_count") if err != nil { span.RecordError(err) + if errors.Is(err, memcache.ErrCacheMiss) { + r.setCurrentCount() + return 0, errors.Wrap(err, "trying to fix...") + } return 0, err } diff --git a/x/association/service.go b/x/association/service.go index 2dddd3b0..7cbffc9e 100644 --- a/x/association/service.go +++ b/x/association/service.go @@ -368,7 +368,7 @@ func (s *service) Create(ctx context.Context, mode core.CommitMode, document str event := core.Event{ Timeline: timeline, - Item: posted, + Item: &posted, Document: document, Signature: signature, Resource: association, diff --git a/x/entity/repository.go b/x/entity/repository.go index 61ff15cc..00fb4bd7 100644 --- a/x/entity/repository.go +++ b/x/entity/repository.go @@ -37,9 +37,12 @@ type repository struct { // NewRepository creates a new host repository func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) Repository { + return &repository{db, mc, schema} +} +func (r *repository) setCurrentCount() { var count int64 - err := db.Model(&core.Entity{}).Count(&count).Error + err := r.db.Model(&core.Entity{}).Count(&count).Error if err != nil { slog.Error( "failed to count entities", @@ -47,9 +50,7 @@ func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) ) } - mc.Set(&memcache.Item{Key: "entity_count", Value: []byte(strconv.FormatInt(count, 10))}) - - return &repository{db, mc, schema} + r.mc.Set(&memcache.Item{Key: "entity_count", Value: []byte(strconv.FormatInt(count, 10))}) } // Count returns the total number of entities @@ -60,6 +61,11 @@ func (r *repository) Count(ctx context.Context) (int64, error) { item, err := r.mc.Get("entity_count") if err != nil { span.RecordError(err) + span.RecordError(err) + if errors.Is(err, memcache.ErrCacheMiss) { + r.setCurrentCount() + return 0, errors.Wrap(err, "trying to fix...") + } return 0, err } @@ -140,11 +146,22 @@ func (r *repository) Upsert(ctx context.Context, entity core.Entity) (core.Entit ctx, span := tracer.Start(ctx, "Entity.Repository.Upsert") defer span.End() + isNewRecord := false + + // check existence + var existing core.Entity + err := r.db.WithContext(ctx).First(&existing, "id = ?", entity.ID).Error + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + isNewRecord = true + } + if err := r.db.WithContext(ctx).Save(&entity).Error; err != nil { return core.Entity{}, err } - r.mc.Increment("entity_count", 1) + if isNewRecord { + r.mc.Increment("entity_count", 1) + } return entity, nil } diff --git a/x/message/repository.go b/x/message/repository.go index ea6f352c..5d9b8c40 100644 --- a/x/message/repository.go +++ b/x/message/repository.go @@ -2,12 +2,14 @@ package message import ( "context" - "errors" - "github.com/bradfitz/gomemcache/memcache" - "github.com/totegamma/concurrent/core" - "gorm.io/gorm" "log/slog" "strconv" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/pkg/errors" + "gorm.io/gorm" + + "github.com/totegamma/concurrent/core" ) // Repository is the interface for message repository @@ -28,9 +30,12 @@ type repository struct { // NewRepository creates a new message repository func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) Repository { + return &repository{db, mc, schema} +} +func (r *repository) setCurrentCount() { var count int64 - err := db.Model(&core.Message{}).Count(&count).Error + err := r.db.Model(&core.Message{}).Count(&count).Error if err != nil { slog.Error( "failed to count messages", @@ -38,9 +43,7 @@ func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) ) } - mc.Set(&memcache.Item{Key: "message_count", Value: []byte(strconv.FormatInt(count, 10))}) - - return &repository{db, mc, schema} + r.mc.Set(&memcache.Item{Key: "message_count", Value: []byte(strconv.FormatInt(count, 10))}) } func (r *repository) normalizeDBID(id string) (string, error) { @@ -121,6 +124,10 @@ func (r *repository) Count(ctx context.Context) (int64, error) { item, err := r.mc.Get("message_count") if err != nil { span.RecordError(err) + if errors.Is(err, memcache.ErrCacheMiss) { + r.setCurrentCount() + return 0, errors.Wrap(err, "trying to fix...") + } return 0, err } diff --git a/x/message/service.go b/x/message/service.go index 6c9065fd..c8f58b30 100644 --- a/x/message/service.go +++ b/x/message/service.go @@ -439,7 +439,7 @@ func (s *service) Create(ctx context.Context, mode core.CommitMode, document str // eventを放流 event := core.Event{ Timeline: timeline, - Item: posted, + Item: &posted, Document: sendDocument, Signature: sendSignature, Resource: sendResource, diff --git a/x/profile/repository.go b/x/profile/repository.go index 85e06d71..a9487bfe 100644 --- a/x/profile/repository.go +++ b/x/profile/repository.go @@ -2,11 +2,11 @@ package profile import ( "context" - "errors" "log/slog" "strconv" "github.com/bradfitz/gomemcache/memcache" + "github.com/pkg/errors" "gorm.io/gorm" "github.com/totegamma/concurrent/core" @@ -30,6 +30,47 @@ type repository struct { schema core.SchemaService } +// NewRepository creates a new profile repository +func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) Repository { + return &repository{db, mc, schema} +} + +func (r *repository) setCurrentCount() { + var count int64 + err := r.db.Model(&core.Profile{}).Count(&count).Error + if err != nil { + slog.Error( + "failed to count profiles", + slog.String("error", err.Error()), + ) + } + + r.mc.Set(&memcache.Item{Key: "profile_count", Value: []byte(strconv.FormatInt(count, 10))}) +} + +// Total returns the total number of profiles +func (r *repository) Count(ctx context.Context) (int64, error) { + ctx, span := tracer.Start(ctx, "Profile.Repository.Count") + defer span.End() + + item, err := r.mc.Get("profile_count") + if err != nil { + span.RecordError(err) + if errors.Is(err, memcache.ErrCacheMiss) { + r.setCurrentCount() + return 0, errors.Wrap(err, "trying to fix...") + } + return 0, err + } + + count, err := strconv.ParseInt(string(item.Value), 10, 64) + if err != nil { + span.RecordError(err) + return 0, err + } + return count, nil +} + func (r *repository) normalizeDBID(id string) (string, error) { normalized := id @@ -100,42 +141,6 @@ func (r *repository) postProcess(ctx context.Context, profile *core.Profile) err return nil } -// NewRepository creates a new profile repository -func NewRepository(db *gorm.DB, mc *memcache.Client, schema core.SchemaService) Repository { - - var count int64 - err := db.Model(&core.Profile{}).Count(&count).Error - if err != nil { - slog.Error( - "failed to count profiles", - slog.String("error", err.Error()), - ) - } - - mc.Set(&memcache.Item{Key: "profile_count", Value: []byte(strconv.FormatInt(count, 10))}) - - return &repository{db, mc, schema} -} - -// Total returns the total number of profiles -func (r *repository) Count(ctx context.Context) (int64, error) { - ctx, span := tracer.Start(ctx, "Profile.Repository.Count") - defer span.End() - - item, err := r.mc.Get("profile_count") - if err != nil { - span.RecordError(err) - return 0, err - } - - count, err := strconv.ParseInt(string(item.Value), 10, 64) - if err != nil { - span.RecordError(err) - return 0, err - } - return count, nil -} - // Upsert creates and updates profile func (r *repository) Upsert(ctx context.Context, profile core.Profile) (core.Profile, error) { ctx, span := tracer.Start(ctx, "Profile.Repository.Upsert") diff --git a/x/timeline/handler.go b/x/timeline/handler.go index 5b73f00d..3fa672f7 100644 --- a/x/timeline/handler.go +++ b/x/timeline/handler.go @@ -195,8 +195,10 @@ func (h handler) GetChunks(c echo.Context) error { return c.JSON(http.StatusBadRequest, echo.Map{"error": "Invalid request"}) } time := time.Unix(timeInt, 0) + epoch := core.Time2Chunk(time) + fmt.Println("epoch", epoch) - chunks, err := h.service.GetChunks(ctx, timelines, time) + chunks, err := h.service.GetChunks(ctx, timelines, epoch) if err != nil { span.RecordError(err) return c.JSON(http.StatusInternalServerError, echo.Map{"error": err.Error()}) diff --git a/x/agent/realtime.go b/x/timeline/keeper.go similarity index 76% rename from x/agent/realtime.go rename to x/timeline/keeper.go index a765ffc2..21c42f33 100644 --- a/x/agent/realtime.go +++ b/x/timeline/keeper.go @@ -1,4 +1,5 @@ -package agent +//go:generate go run go.uber.org/mock/mockgen -source=keeper.go -destination=mock/keeper.go +package timeline import ( "context" @@ -12,8 +13,11 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/gorilla/websocket" - "github.com/totegamma/concurrent/core" + "github.com/redis/go-redis/v9" "go.opentelemetry.io/otel/attribute" + + "github.com/totegamma/concurrent/client" + "github.com/totegamma/concurrent/core" ) var ( @@ -23,14 +27,60 @@ var ( remoteConns = make(map[string]*websocket.Conn) ) +type Keeper interface { + Start(ctx context.Context) + GetRemoteSubs() []string + GetCurrentSubs(ctx context.Context) []string + GetMetrics() map[string]int64 +} + +type keeper struct { + rdb *redis.Client + mc *memcache.Client + client client.Client + config core.Config +} + +func NewKeeper(rdb *redis.Client, mc *memcache.Client, client client.Client, config core.Config) Keeper { + return &keeper{ + rdb: rdb, + mc: mc, + client: client, + config: config, + } +} + type channelRequest struct { Type string `json:"type"` Channels []string `json:"channels"` } -func (a *agent) GetCurrentSubs(ctx context.Context) []string { +func (k *keeper) GetMetrics() map[string]int64 { + metrics := make(map[string]int64) + metrics["remoteSubs"] = int64(len(remoteSubs)) + metrics["remoteConns"] = int64(len(remoteConns)) + return metrics +} + +func (k *keeper) Start(ctx context.Context) { + go k.watchEventRoutine(ctx) + go k.chunkUpdaterRoutine(ctx) + go k.connectionkeeperRoutine(ctx) +} + +func (k *keeper) GetRemoteSubs() []string { + var subs []string + for _, timelines := range remoteSubs { + for _, timeline := range timelines { + subs = append(subs, timeline) + } + } + return subs +} - query := a.rdb.PubSubChannels(ctx, "*") +func (k *keeper) GetCurrentSubs(ctx context.Context) []string { + + query := k.rdb.PubSubChannels(ctx, "*") channels := query.Val() uniqueChannelsMap := make(map[string]bool) @@ -52,9 +102,9 @@ func (a *agent) GetCurrentSubs(ctx context.Context) []string { // update m.remoteSubs // also update remoteConns if needed -func (a *agent) createInsufficientSubs(ctx context.Context) { +func (k *keeper) createInsufficientSubs(ctx context.Context) { - currentSubs := a.GetCurrentSubs(ctx) + currentSubs := k.GetCurrentSubs(ctx) // update remoteSubs // only add new subscriptions @@ -67,7 +117,7 @@ func (a *agent) createInsufficientSubs(ctx context.Context) { } domain := split[len(split)-1] - if domain == a.config.FQDN { + if domain == k.config.FQDN { continue } @@ -87,14 +137,14 @@ func (a *agent) createInsufficientSubs(ctx context.Context) { } for _, domain := range changedRemotes { - a.RemoteSubRoutine(ctx, domain, remoteSubs[domain]) + k.remoteSubRoutine(ctx, domain, remoteSubs[domain]) } } // DeleteExcessiveSubs deletes subscriptions that are not needed anymore -func (a *agent) deleteExcessiveSubs(ctx context.Context) { +func (k *keeper) deleteExcessiveSubs(ctx context.Context) { - currentSubs := a.GetCurrentSubs(ctx) + currentSubs := k.GetCurrentSubs(ctx) var closeList []string @@ -133,12 +183,12 @@ func (a *agent) deleteExcessiveSubs(ctx context.Context) { } // RemoteSubRoutine subscribes to a remote server -func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines []string) { +func (k *keeper) remoteSubRoutine(ctx context.Context, domain string, timelines []string) { if _, ok := remoteConns[domain]; !ok { // new server, create new connection // check server availability - domainInfo, err := a.client.GetDomain(ctx, domain, nil) + domainInfo, err := k.client.GetDomain(ctx, domain, nil) if err != nil { slog.Error( fmt.Sprintf("fail to get domain info: %v", err), @@ -147,7 +197,7 @@ func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines [ ) return } - if domainInfo.Dimension != a.config.Dimension { + if domainInfo.Dimension != k.config.Dimension { slog.Error( fmt.Sprintf("domain dimention mismatch: %s", domain), slog.String("module", "agent"), @@ -256,7 +306,7 @@ func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines [ } // publish message to Redis - err = a.rdb.Publish(ctx, event.Timeline, string(message)).Err() + err = k.rdb.Publish(ctx, event.Timeline, string(message)).Err() if err != nil { slog.Error( fmt.Sprintf("fail to publish message to Redis"), @@ -267,6 +317,10 @@ func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines [ continue } + if event.Item == nil || event.Item.ResourceID == "" { + continue + } + // update cache json, err := json.Marshal(event.Item) if err != nil { @@ -278,43 +332,18 @@ func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines [ ) continue } - json = append(json, ',') - - timelineID := event.Item.TimelineID - if !strings.Contains(timelineID, "@") { - timelineID = timelineID + "@" + domain - } + val := "," + string(json) // update cache - // first, try to get itr - itr := "timeline:itr:all:" + timelineID + ":" + core.Time2Chunk(event.Item.CDate) - itrVal, err := a.mc.Get(itr) - var cacheKey string - if err == nil { - cacheKey = string(itrVal.Value) - } else { - // 最新時刻のイテレーターがないということは、キャッシュがないということ - // とはいえ今後はいい感じにキャッシュを作れるようにしたい - // 例えば、今までのキャッシュを(現時点では取得不能)最新のitrが指すようにして - // 今までのキャッシュを更新し続けるとか... (TODO) - // cacheKey := "timeline:body:all:" + event.Item.TimelienID + ":" + core.Time2Chunk(event.Item.CDate) - slog.Info( - fmt.Sprintf("no need to update cache: %s", itr), - slog.String("module", "agent"), - slog.String("group", "realtime"), - ) - continue - } - - err = a.mc.Append(&memcache.Item{Key: cacheKey, Value: json}) - if err != nil { - slog.Error( - fmt.Sprintf("fail to update cache: %s", itr), - slog.String("error", err.Error()), - slog.String("module", "agent"), - slog.String("group", "realtime"), - ) - } + // Note: see x/timeline/repository.go CreateItem + epoch := core.Time2Chunk(event.Item.CDate) + itrKey := "tl:itr:" + event.Timeline + ":" + epoch + bodyKey := "tl:body:" + event.Timeline + ":" + epoch + // fmt.Println("[keep] set cache", itrKey, " -> ", bodyKey) + err = k.mc.Replace(&memcache.Item{Key: itrKey, Value: []byte(epoch)}) + // fmt.Println("[keep] replace err", err) + err = k.mc.Prepend(&memcache.Item{Key: bodyKey, Value: []byte(val)}) + // fmt.Println("[keep] prepend err", err) case <-pingTicker.C: if err := c.WriteMessage(websocket.PingMessage, []byte{}); err != nil { @@ -360,9 +389,9 @@ func (a *agent) RemoteSubRoutine(ctx context.Context, domain string, timelines [ ) } -// ConnectionKeeperRoutine +// ConnectionkeeperRoutine // 接続が失われている場合、再接続を試みる -func (a *agent) connectionKeeperRoutine(ctx context.Context) { +func (k *keeper) connectionkeeperRoutine(ctx context.Context) { ticker := time.NewTicker(time.Second * 10) defer ticker.Stop() @@ -370,13 +399,7 @@ func (a *agent) connectionKeeperRoutine(ctx context.Context) { for { select { case <-ticker.C: - a.createInsufficientSubs(ctx) - slog.InfoContext( - ctx, - fmt.Sprintf("connection keeper: %d/%d", len(remoteConns), len(remoteSubs)), - slog.String("module", "agent"), - slog.String("group", "realtime"), - ) + k.createInsufficientSubs(ctx) for domain := range remoteSubs { if _, ok := remoteConns[domain]; !ok { slog.Info( @@ -384,7 +407,7 @@ func (a *agent) connectionKeeperRoutine(ctx context.Context) { slog.String("module", "agent"), slog.String("group", "realtime"), ) - a.RemoteSubRoutine(ctx, domain, remoteSubs[domain]) + k.remoteSubRoutine(ctx, domain, remoteSubs[domain]) } } } @@ -392,7 +415,7 @@ func (a *agent) connectionKeeperRoutine(ctx context.Context) { } // ChunkUpdaterRoutine -func (a *agent) chunkUpdaterRoutine(ctx context.Context) { +func (k *keeper) chunkUpdaterRoutine(ctx context.Context) { currentChunk := core.Time2Chunk(time.Now()) for { // 次の実行時刻を計算 @@ -423,16 +446,16 @@ func (a *agent) chunkUpdaterRoutine(ctx context.Context) { slog.String("group", "realtime"), ) - a.deleteExcessiveSubs(ctx) + k.deleteExcessiveSubs(ctx) currentChunk = newChunk } } // watchEventRoutine -func (a *agent) watchEventRoutine(ctx context.Context) { +func (k *keeper) watchEventRoutine(ctx context.Context) { - pubsub := a.rdb.Subscribe(ctx, "concrnt:subscription:updated") + pubsub := k.rdb.Subscribe(ctx, "concrnt:subscription:updated") defer pubsub.Close() psch := pubsub.Channel() @@ -456,7 +479,7 @@ func (a *agent) watchEventRoutine(ctx context.Context) { slog.String("module", "agent"), slog.String("group", "realtime"), ) - a.createInsufficientSubs(ctx) + k.createInsufficientSubs(ctx) } } diff --git a/x/timeline/mock/keeper.go b/x/timeline/mock/keeper.go new file mode 100644 index 00000000..2923eb35 --- /dev/null +++ b/x/timeline/mock/keeper.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: keeper.go +// +// Generated by this command: +// +// mockgen -source=keeper.go -destination=mock/keeper.go +// + +// Package mock_timeline is a generated GoMock package. +package mock_timeline + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockKeeper is a mock of Keeper interface. +type MockKeeper struct { + ctrl *gomock.Controller + recorder *MockKeeperMockRecorder +} + +// MockKeeperMockRecorder is the mock recorder for MockKeeper. +type MockKeeperMockRecorder struct { + mock *MockKeeper +} + +// NewMockKeeper creates a new mock instance. +func NewMockKeeper(ctrl *gomock.Controller) *MockKeeper { + mock := &MockKeeper{ctrl: ctrl} + mock.recorder = &MockKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKeeper) EXPECT() *MockKeeperMockRecorder { + return m.recorder +} + +// GetCurrentSubs mocks base method. +func (m *MockKeeper) GetCurrentSubs(ctx context.Context) []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSubs", ctx) + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetCurrentSubs indicates an expected call of GetCurrentSubs. +func (mr *MockKeeperMockRecorder) GetCurrentSubs(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSubs", reflect.TypeOf((*MockKeeper)(nil).GetCurrentSubs), ctx) +} + +// GetRemoteSubs mocks base method. +func (m *MockKeeper) GetRemoteSubs() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRemoteSubs") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetRemoteSubs indicates an expected call of GetRemoteSubs. +func (mr *MockKeeperMockRecorder) GetRemoteSubs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteSubs", reflect.TypeOf((*MockKeeper)(nil).GetRemoteSubs)) +} + +// Start mocks base method. +func (m *MockKeeper) Start(ctx context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Start", ctx) +} + +// Start indicates an expected call of Start. +func (mr *MockKeeperMockRecorder) Start(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockKeeper)(nil).Start), ctx) +} diff --git a/x/timeline/mock/repository.go b/x/timeline/mock/repository.go new file mode 100644 index 00000000..75b31481 --- /dev/null +++ b/x/timeline/mock/repository.go @@ -0,0 +1,425 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: repository.go +// +// Generated by this command: +// +// mockgen -source=repository.go -destination=mock/repository.go +// + +// Package mock_timeline is a generated GoMock package. +package mock_timeline + +import ( + context "context" + reflect "reflect" + time "time" + + core "github.com/totegamma/concurrent/core" + gomock "go.uber.org/mock/gomock" +) + +// MockRepository is a mock of Repository interface. +type MockRepository struct { + ctrl *gomock.Controller + recorder *MockRepositoryMockRecorder +} + +// MockRepositoryMockRecorder is the mock recorder for MockRepository. +type MockRepositoryMockRecorder struct { + mock *MockRepository +} + +// NewMockRepository creates a new mock instance. +func NewMockRepository(ctrl *gomock.Controller) *MockRepository { + mock := &MockRepository{ctrl: ctrl} + mock.recorder = &MockRepositoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRepository) EXPECT() *MockRepositoryMockRecorder { + return m.recorder +} + +// Count mocks base method. +func (m *MockRepository) Count(ctx context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Count", ctx) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Count indicates an expected call of Count. +func (mr *MockRepositoryMockRecorder) Count(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockRepository)(nil).Count), ctx) +} + +// CreateItem mocks base method. +func (m *MockRepository) CreateItem(ctx context.Context, item core.TimelineItem) (core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateItem", ctx, item) + ret0, _ := ret[0].(core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateItem indicates an expected call of CreateItem. +func (mr *MockRepositoryMockRecorder) CreateItem(ctx, item any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateItem", reflect.TypeOf((*MockRepository)(nil).CreateItem), ctx, item) +} + +// DeleteItem mocks base method. +func (m *MockRepository) DeleteItem(ctx context.Context, timelineID, objectID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteItem", ctx, timelineID, objectID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteItem indicates an expected call of DeleteItem. +func (mr *MockRepositoryMockRecorder) DeleteItem(ctx, timelineID, objectID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteItem", reflect.TypeOf((*MockRepository)(nil).DeleteItem), ctx, timelineID, objectID) +} + +// DeleteItemByResourceID mocks base method. +func (m *MockRepository) DeleteItemByResourceID(ctx context.Context, resourceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteItemByResourceID", ctx, resourceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteItemByResourceID indicates an expected call of DeleteItemByResourceID. +func (mr *MockRepositoryMockRecorder) DeleteItemByResourceID(ctx, resourceID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteItemByResourceID", reflect.TypeOf((*MockRepository)(nil).DeleteItemByResourceID), ctx, resourceID) +} + +// DeleteTimeline mocks base method. +func (m *MockRepository) DeleteTimeline(ctx context.Context, key string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTimeline", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTimeline indicates an expected call of DeleteTimeline. +func (mr *MockRepositoryMockRecorder) DeleteTimeline(ctx, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTimeline", reflect.TypeOf((*MockRepository)(nil).DeleteTimeline), ctx, key) +} + +// GetChunkIterators mocks base method. +func (m *MockRepository) GetChunkIterators(ctx context.Context, timelines []string, chunk string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChunkIterators", ctx, timelines, chunk) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChunkIterators indicates an expected call of GetChunkIterators. +func (mr *MockRepositoryMockRecorder) GetChunkIterators(ctx, timelines, chunk any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunkIterators", reflect.TypeOf((*MockRepository)(nil).GetChunkIterators), ctx, timelines, chunk) +} + +// GetChunksFromCache mocks base method. +func (m *MockRepository) GetChunksFromCache(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChunksFromCache", ctx, timelines, chunk) + ret0, _ := ret[0].(map[string]core.Chunk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChunksFromCache indicates an expected call of GetChunksFromCache. +func (mr *MockRepositoryMockRecorder) GetChunksFromCache(ctx, timelines, chunk any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunksFromCache", reflect.TypeOf((*MockRepository)(nil).GetChunksFromCache), ctx, timelines, chunk) +} + +// GetChunksFromDB mocks base method. +func (m *MockRepository) GetChunksFromDB(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChunksFromDB", ctx, timelines, chunk) + ret0, _ := ret[0].(map[string]core.Chunk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChunksFromDB indicates an expected call of GetChunksFromDB. +func (mr *MockRepositoryMockRecorder) GetChunksFromDB(ctx, timelines, chunk any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChunksFromDB", reflect.TypeOf((*MockRepository)(nil).GetChunksFromDB), ctx, timelines, chunk) +} + +// GetImmediateItems mocks base method. +func (m *MockRepository) GetImmediateItems(ctx context.Context, timelineID string, since time.Time, limit int) ([]core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImmediateItems", ctx, timelineID, since, limit) + ret0, _ := ret[0].([]core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetImmediateItems indicates an expected call of GetImmediateItems. +func (mr *MockRepositoryMockRecorder) GetImmediateItems(ctx, timelineID, since, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImmediateItems", reflect.TypeOf((*MockRepository)(nil).GetImmediateItems), ctx, timelineID, since, limit) +} + +// GetItem mocks base method. +func (m *MockRepository) GetItem(ctx context.Context, timelineID, objectID string) (core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetItem", ctx, timelineID, objectID) + ret0, _ := ret[0].(core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetItem indicates an expected call of GetItem. +func (mr *MockRepositoryMockRecorder) GetItem(ctx, timelineID, objectID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetItem", reflect.TypeOf((*MockRepository)(nil).GetItem), ctx, timelineID, objectID) +} + +// GetMetrics mocks base method. +func (m *MockRepository) GetMetrics() map[string]int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetrics") + ret0, _ := ret[0].(map[string]int64) + return ret0 +} + +// GetMetrics indicates an expected call of GetMetrics. +func (mr *MockRepositoryMockRecorder) GetMetrics() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockRepository)(nil).GetMetrics)) +} + +// GetNormalizationCache mocks base method. +func (m *MockRepository) GetNormalizationCache(ctx context.Context, timelineID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNormalizationCache", ctx, timelineID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNormalizationCache indicates an expected call of GetNormalizationCache. +func (mr *MockRepositoryMockRecorder) GetNormalizationCache(ctx, timelineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNormalizationCache", reflect.TypeOf((*MockRepository)(nil).GetNormalizationCache), ctx, timelineID) +} + +// GetRecentItems mocks base method. +func (m *MockRepository) GetRecentItems(ctx context.Context, timelineID string, until time.Time, limit int) ([]core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRecentItems", ctx, timelineID, until, limit) + ret0, _ := ret[0].([]core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRecentItems indicates an expected call of GetRecentItems. +func (mr *MockRepositoryMockRecorder) GetRecentItems(ctx, timelineID, until, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRecentItems", reflect.TypeOf((*MockRepository)(nil).GetRecentItems), ctx, timelineID, until, limit) +} + +// GetTimeline mocks base method. +func (m *MockRepository) GetTimeline(ctx context.Context, key string) (core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimeline", ctx, key) + ret0, _ := ret[0].(core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTimeline indicates an expected call of GetTimeline. +func (mr *MockRepositoryMockRecorder) GetTimeline(ctx, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimeline", reflect.TypeOf((*MockRepository)(nil).GetTimeline), ctx, key) +} + +// GetTimelineFromRemote mocks base method. +func (m *MockRepository) GetTimelineFromRemote(ctx context.Context, host, key string) (core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimelineFromRemote", ctx, host, key) + ret0, _ := ret[0].(core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTimelineFromRemote indicates an expected call of GetTimelineFromRemote. +func (mr *MockRepositoryMockRecorder) GetTimelineFromRemote(ctx, host, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimelineFromRemote", reflect.TypeOf((*MockRepository)(nil).GetTimelineFromRemote), ctx, host, key) +} + +// ListTimelineByAuthor mocks base method. +func (m *MockRepository) ListTimelineByAuthor(ctx context.Context, author string) ([]core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTimelineByAuthor", ctx, author) + ret0, _ := ret[0].([]core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTimelineByAuthor indicates an expected call of ListTimelineByAuthor. +func (mr *MockRepositoryMockRecorder) ListTimelineByAuthor(ctx, author any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTimelineByAuthor", reflect.TypeOf((*MockRepository)(nil).ListTimelineByAuthor), ctx, author) +} + +// ListTimelineByAuthorOwned mocks base method. +func (m *MockRepository) ListTimelineByAuthorOwned(ctx context.Context, author string) ([]core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTimelineByAuthorOwned", ctx, author) + ret0, _ := ret[0].([]core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTimelineByAuthorOwned indicates an expected call of ListTimelineByAuthorOwned. +func (mr *MockRepositoryMockRecorder) ListTimelineByAuthorOwned(ctx, author any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTimelineByAuthorOwned", reflect.TypeOf((*MockRepository)(nil).ListTimelineByAuthorOwned), ctx, author) +} + +// ListTimelineBySchema mocks base method. +func (m *MockRepository) ListTimelineBySchema(ctx context.Context, schema string) ([]core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTimelineBySchema", ctx, schema) + ret0, _ := ret[0].([]core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTimelineBySchema indicates an expected call of ListTimelineBySchema. +func (mr *MockRepositoryMockRecorder) ListTimelineBySchema(ctx, schema any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTimelineBySchema", reflect.TypeOf((*MockRepository)(nil).ListTimelineBySchema), ctx, schema) +} + +// ListTimelineSubscriptions mocks base method. +func (m *MockRepository) ListTimelineSubscriptions(ctx context.Context) (map[string]int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTimelineSubscriptions", ctx) + ret0, _ := ret[0].(map[string]int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTimelineSubscriptions indicates an expected call of ListTimelineSubscriptions. +func (mr *MockRepositoryMockRecorder) ListTimelineSubscriptions(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTimelineSubscriptions", reflect.TypeOf((*MockRepository)(nil).ListTimelineSubscriptions), ctx) +} + +// LoadChunkBodies mocks base method. +func (m *MockRepository) LoadChunkBodies(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadChunkBodies", ctx, query) + ret0, _ := ret[0].(map[string]core.Chunk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadChunkBodies indicates an expected call of LoadChunkBodies. +func (mr *MockRepositoryMockRecorder) LoadChunkBodies(ctx, query any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadChunkBodies", reflect.TypeOf((*MockRepository)(nil).LoadChunkBodies), ctx, query) +} + +// LookupChunkItrs mocks base method. +func (m *MockRepository) LookupChunkItrs(ctx context.Context, timelines []string, epoch string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LookupChunkItrs", ctx, timelines, epoch) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LookupChunkItrs indicates an expected call of LookupChunkItrs. +func (mr *MockRepositoryMockRecorder) LookupChunkItrs(ctx, timelines, epoch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LookupChunkItrs", reflect.TypeOf((*MockRepository)(nil).LookupChunkItrs), ctx, timelines, epoch) +} + +// PublishEvent mocks base method. +func (m *MockRepository) PublishEvent(ctx context.Context, event core.Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishEvent", ctx, event) + ret0, _ := ret[0].(error) + return ret0 +} + +// PublishEvent indicates an expected call of PublishEvent. +func (mr *MockRepositoryMockRecorder) PublishEvent(ctx, event any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishEvent", reflect.TypeOf((*MockRepository)(nil).PublishEvent), ctx, event) +} + +// Query mocks base method. +func (m *MockRepository) Query(ctx context.Context, timelineID, schema, owner, author string, until time.Time, limit int) ([]core.TimelineItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, timelineID, schema, owner, author, until, limit) + ret0, _ := ret[0].([]core.TimelineItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query. +func (mr *MockRepositoryMockRecorder) Query(ctx, timelineID, schema, owner, author, until, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockRepository)(nil).Query), ctx, timelineID, schema, owner, author, until, limit) +} + +// SetNormalizationCache mocks base method. +func (m *MockRepository) SetNormalizationCache(ctx context.Context, timelineID, value string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNormalizationCache", ctx, timelineID, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNormalizationCache indicates an expected call of SetNormalizationCache. +func (mr *MockRepositoryMockRecorder) SetNormalizationCache(ctx, timelineID, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNormalizationCache", reflect.TypeOf((*MockRepository)(nil).SetNormalizationCache), ctx, timelineID, value) +} + +// Subscribe mocks base method. +func (m *MockRepository) Subscribe(ctx context.Context, channels []string, event chan<- core.Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subscribe", ctx, channels, event) + ret0, _ := ret[0].(error) + return ret0 +} + +// Subscribe indicates an expected call of Subscribe. +func (mr *MockRepositoryMockRecorder) Subscribe(ctx, channels, event any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockRepository)(nil).Subscribe), ctx, channels, event) +} + +// UpsertTimeline mocks base method. +func (m *MockRepository) UpsertTimeline(ctx context.Context, timeline core.Timeline) (core.Timeline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertTimeline", ctx, timeline) + ret0, _ := ret[0].(core.Timeline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpsertTimeline indicates an expected call of UpsertTimeline. +func (mr *MockRepositoryMockRecorder) UpsertTimeline(ctx, timeline any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTimeline", reflect.TypeOf((*MockRepository)(nil).UpsertTimeline), ctx, timeline) +} diff --git a/x/timeline/repository.go b/x/timeline/repository.go index c079d5d3..f06abbb6 100644 --- a/x/timeline/repository.go +++ b/x/timeline/repository.go @@ -1,3 +1,4 @@ +//go:generate go run go.uber.org/mock/mockgen -source=repository.go -destination=mock/repository.go package timeline import ( @@ -13,6 +14,7 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/pkg/errors" "github.com/redis/go-redis/v9" + "go.opentelemetry.io/otel/attribute" "gorm.io/gorm" "github.com/totegamma/concurrent/client" @@ -38,11 +40,6 @@ type Repository interface { GetRecentItems(ctx context.Context, timelineID string, until time.Time, limit int) ([]core.TimelineItem, error) GetImmediateItems(ctx context.Context, timelineID string, since time.Time, limit int) ([]core.TimelineItem, error) - GetChunksFromCache(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) - GetChunksFromDB(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) - GetChunkIterators(ctx context.Context, timelines []string, chunk string) (map[string]string, error) - GetChunksFromRemote(ctx context.Context, host string, timelines []string, queryTime time.Time) (map[string]core.Chunk, error) - SaveToCache(ctx context.Context, chunks map[string]core.Chunk, queryTime time.Time) error PublishEvent(ctx context.Context, event core.Event) error ListTimelineSubscriptions(ctx context.Context) (map[string]int64, error) @@ -54,22 +51,45 @@ type Repository interface { GetNormalizationCache(ctx context.Context, timelineID string) (string, error) Query(ctx context.Context, timelineID, schema, owner, author string, until time.Time, limit int) ([]core.TimelineItem, error) + + LookupChunkItrs(ctx context.Context, timelines []string, epoch string) (map[string]string, error) + LoadChunkBodies(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) + + GetMetrics() map[string]int64 } type repository struct { db *gorm.DB rdb *redis.Client mc *memcache.Client + keeper Keeper client client.Client schema core.SchemaService config core.Config + + lookupChunkItrsCacheMisses int64 + lookupChunkItrsCacheHits int64 + loadChunkBodiesCacheMisses int64 + loadChunkBodiesCacheHits int64 } // NewRepository creates a new timeline repository -func NewRepository(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client client.Client, schema core.SchemaService, config core.Config) Repository { +func NewRepository(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, keeper Keeper, client client.Client, schema core.SchemaService, config core.Config) Repository { + return &repository{ + db, + rdb, + mc, + keeper, + client, + schema, + config, + 0, 0, 0, 0, + } +} +func (r *repository) setCurrentCount() { var count int64 - err := db.Model(&core.Timeline{}).Count(&count).Error + err := r.db.Model(&core.Timeline{}).Count(&count).Error if err != nil { slog.Error( "failed to count timelines", @@ -77,16 +97,395 @@ func NewRepository(db *gorm.DB, rdb *redis.Client, mc *memcache.Client, client c ) } - mc.Set(&memcache.Item{Key: "timeline_count", Value: []byte(strconv.FormatInt(count, 10))}) + r.mc.Set(&memcache.Item{Key: "timeline_count", Value: []byte(strconv.FormatInt(count, 10))}) +} + +func (r *repository) GetMetrics() map[string]int64 { + + keeperMetrics := r.keeper.GetMetrics() + + repoMetrics := map[string]int64{ + "lookup_chunk_itr_cache_misses": r.lookupChunkItrsCacheMisses, + "lookup_chunk_itr_cache_hits": r.lookupChunkItrsCacheHits, + "load_chunk_bodies_cache_misses": r.loadChunkBodiesCacheMisses, + "load_chunk_bodies_cache_hits": r.loadChunkBodiesCacheHits, + } + + for k, v := range keeperMetrics { + repoMetrics[k] = v + } - return &repository{db, rdb, mc, client, schema, config} + return repoMetrics } const ( - normaalizationCachePrefix = "timeline:normalize:" + normaalizationCachePrefix = "tl:norm:" normaalizationCacheTTL = 60 * 15 // 15 minutes + + tlItrCachePrefix = "tl:itr:" + tlItrCacheTTL = 60 * 60 * 24 * 2 // 2 days + tlBodyCachePrefix = "tl:body:" + tlBodyCacheTTL = 60 * 60 * 24 * 2 // 2 days + + defaultChunkSize = 32 ) +func (r *repository) LookupChunkItrs(ctx context.Context, normalized []string, epoch string) (map[string]string, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LookupChunkItr") + defer span.End() + + keys := make([]string, len(normalized)) + keytable := make(map[string]string) + for i, timeline := range normalized { + key := tlItrCachePrefix + timeline + ":" + epoch + keys[i] = key + keytable[key] = timeline + } + + cache, err := r.mc.GetMulti(keys) + if err != nil { + span.RecordError(err) + //return nil, err + } + + var result = map[string]string{} + var missed = []string{} + for _, key := range keys { + timeline := keytable[key] + if cache[key] != nil { + result[timeline] = string(cache[key].Value) + r.lookupChunkItrsCacheHits++ + } else { + missed = append(missed, timeline) + r.lookupChunkItrsCacheMisses++ + } + } + + var domainMap = make(map[string][]string) + for _, timeline := range missed { + split := strings.Split(timeline, "@") + domain := split[len(split)-1] + if len(split) >= 2 { + if _, ok := domainMap[domain]; !ok { + domainMap[domain] = make([]string, 0) + } + if domain == r.config.FQDN { + domainMap[domain] = append(domainMap[domain], split[0]) + } else { + domainMap[domain] = append(domainMap[domain], timeline) + } + } + } + + for domain, timelines := range domainMap { + if domain == r.config.FQDN { + res, err := r.lookupLocalItrs(ctx, timelines, epoch) + if err != nil { + span.RecordError(err) + continue + } + for k, v := range res { + result[k] = v + } + } else { + res, err := r.lookupRemoteItrs(ctx, domain, timelines, epoch) + if err != nil { + span.RecordError(err) + continue + } + for k, v := range res { + result[k] = v + } + } + } + + return result, nil +} + +func (r *repository) LoadChunkBodies(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LoadChunkBodies") + defer span.End() + + keys := []string{} + keytable := map[string]string{} + for timeline, epoch := range query { + key := tlBodyCachePrefix + timeline + ":" + epoch + keys = append(keys, key) + keytable[key] = timeline + } + + cache, err := r.mc.GetMulti(keys) + if err != nil { + span.RecordError(err) + //return nil, err + } + + result := make(map[string]core.Chunk) + var missed = map[string]string{} + + for _, key := range keys { + timeline := keytable[key] + if cache[key] != nil { + var items []core.TimelineItem + cacheStr := string(cache[key].Value) + cacheStr = cacheStr[1:] + cacheStr = "[" + cacheStr + "]" + err = json.Unmarshal([]byte(cacheStr), &items) + if err != nil { + span.RecordError(err) + continue + } + result[timeline] = core.Chunk{ + Key: key, + Epoch: query[timeline], + Items: items, + } + r.loadChunkBodiesCacheHits++ + } else { + missed[timeline] = query[timeline] + r.loadChunkBodiesCacheMisses++ + } + } + + var domainMap = make(map[string]map[string]string) + for timeline, epoch := range missed { + split := strings.Split(timeline, "@") + domain := split[len(split)-1] + if len(split) >= 2 { + if _, ok := domainMap[domain]; !ok { + domainMap[domain] = make(map[string]string) + } + domainMap[domain][timeline] = epoch + } + } + + for domain, q := range domainMap { + if domain == r.config.FQDN { + for timeline, epoch := range q { + res, err := r.loadLocalBody(ctx, timeline, epoch) + if err != nil { + span.RecordError(err) + continue + } + result[timeline] = res + } + } else { + res, err := r.loadRemoteBodies(ctx, domain, q) + if err != nil { + span.RecordError(err) + continue + } + for k, v := range res { + result[k] = v + } + } + } + + return result, nil +} + +func (r *repository) lookupLocalItrs(ctx context.Context, timelines []string, epoch string) (map[string]string, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LookupLocalItr") + defer span.End() + + dbids := []string{} + for _, timeline := range timelines { + dbid := timeline + if strings.Contains(dbid, "@") { + split := strings.Split(timeline, "@") + if len(split) > 1 && split[len(split)-1] != r.config.FQDN { + span.RecordError(fmt.Errorf("invalid timeline id: %s", timeline)) + continue + } + dbid = split[0] + } + if len(dbid) == 27 { + if dbid[0] != 't' { + span.RecordError(fmt.Errorf("timeline typed-id must start with 't' %s", timeline)) + continue + } + dbid = dbid[1:] + } + if len(dbid) != 26 { + span.RecordError(fmt.Errorf("timeline id must be 26 characters long %s", timeline)) + continue + } + dbids = append(dbids, dbid) + } + + result := make(map[string]string) + if len(dbids) > 0 { + var res []struct { + TimelineID string + MaxCDate time.Time + } + + err := r.db.WithContext(ctx). + Model(&core.TimelineItem{}). + Select("timeline_id, max(c_date) as max_c_date"). + Where("timeline_id in (?) and c_date <= ?", dbids, core.Chunk2RecentTime(epoch)). + Group("timeline_id"). + Scan(&res).Error + if err != nil { + span.RecordError(err) + return nil, err + } + + for _, item := range res { + id := "t" + item.TimelineID + "@" + r.config.FQDN + key := tlItrCachePrefix + id + ":" + epoch + value := core.Time2Chunk(item.MaxCDate) + span.AddEvent(fmt.Sprintf("cache lookupLocalItrs: %s", key)) + r.mc.Set(&memcache.Item{Key: key, Value: []byte(value), Expiration: tlItrCacheTTL}) + result[id] = value + } + } + + return result, nil +} + +func (r *repository) lookupRemoteItrs(ctx context.Context, domain string, timelines []string, epoch string) (map[string]string, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LookupRemoteItr") + defer span.End() + + span.SetAttributes( + attribute.String("domain", domain), + attribute.StringSlice("timelines", timelines), + attribute.String("epoch", epoch), + ) + + result, err := r.client.GetChunkItrs(ctx, domain, timelines, epoch, nil) + if err != nil { + span.RecordError(err) + return nil, err + } + + currentSubscriptions := r.keeper.GetRemoteSubs() + span.SetAttributes(attribute.StringSlice("currentSubscriptions", currentSubscriptions)) + for timeline, itr := range result { + + // 最新のチャンクに関しては、socketが張られてるキャッシュしか温められないのでそれだけ保持 + if epoch == core.Time2Chunk(time.Now()) && !slices.Contains(currentSubscriptions, timeline) { + span.AddEvent(fmt.Sprintf("continue: %s", timeline)) + continue + } + + key := tlItrCachePrefix + timeline + ":" + epoch + span.AddEvent(fmt.Sprintf("cache lookupRemoteItrs: %s", key)) + r.mc.Set(&memcache.Item{Key: key, Value: []byte(itr), Expiration: tlItrCacheTTL}) + } + + return result, nil +} + +func (r *repository) loadLocalBody(ctx context.Context, timeline string, epoch string) (core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LoadLocalBody") + defer span.End() + + chunkDate := core.Chunk2RecentTime(epoch) + prevChunkDate := core.Chunk2RecentTime(core.PrevChunk(epoch)) + + timelineID := timeline + if strings.Contains(timelineID, "@") { + timelineID = strings.Split(timelineID, "@")[0] + } + if len(timelineID) == 27 { + if timelineID[0] != 't' { + return core.Chunk{}, fmt.Errorf("timeline typed-id must start with 't'") + } + timelineID = timelineID[1:] + } + + var items []core.TimelineItem + + err := r.db.WithContext(ctx). + Where("timeline_id = ? and c_date <= ?", timelineID, chunkDate). + Order("c_date desc"). + Limit(defaultChunkSize). + Find(&items).Error + + // 得られた中で最も古いアイテムがチャンクをまたいでない場合、取得漏れがある可能性がある + // 代わりに、チャンク内のレンジの全てのアイテムを取得する + if items[len(items)-1].CDate.After(prevChunkDate) { + err = r.db.WithContext(ctx). + Where("timeline_id = ? and ? < c_date and c_date <= ?", timelineID, prevChunkDate, chunkDate). + Order("c_date desc"). + Find(&items).Error + } + + if err != nil { + span.RecordError(err) + return core.Chunk{}, err + } + + // append domain to timelineID + for i, item := range items { + items[i].TimelineID = item.TimelineID + "@" + r.config.FQDN + } + + b, err := json.Marshal(items) + if err != nil { + span.RecordError(err) + return core.Chunk{}, err + } + key := tlBodyCachePrefix + timeline + ":" + epoch + cacheStr := "," + string(b[1:len(b)-1]) + span.AddEvent(fmt.Sprintf("cache loadLocalBody: %s", key)) + err = r.mc.Set(&memcache.Item{Key: key, Value: []byte(cacheStr), Expiration: tlBodyCacheTTL}) + if err != nil { + span.RecordError(err) + } + + return core.Chunk{ + Key: key, + Epoch: epoch, + Items: items, + }, nil + +} + +func (r *repository) loadRemoteBodies(ctx context.Context, remote string, query map[string]string) (map[string]core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Timeline.Repository.LoadRemoteBody") + defer span.End() + + result, err := r.client.GetChunkBodies(ctx, remote, query, nil) + if err != nil { + span.RecordError(err) + return nil, err + } + + currentSubscriptions := r.keeper.GetRemoteSubs() + for timeline, chunk := range result { + + // 最新のチャンクに関しては、socketが張られてるキャッシュしか温められないのでそれだけ保持 + if chunk.Epoch == core.Time2Chunk(time.Now()) && !slices.Contains(currentSubscriptions, timeline) { + span.AddEvent(fmt.Sprintf("continue: %s", timeline)) + continue + } + + if len(chunk.Items) == 0 { + span.AddEvent(fmt.Sprintf("empty chunk: %s", timeline)) + continue + } + + key := tlBodyCachePrefix + timeline + ":" + chunk.Epoch + b, err := json.Marshal(chunk.Items) + if err != nil { + span.RecordError(err) + continue + } + cacheStr := "," + string(b[1:len(b)-1]) + span.AddEvent(fmt.Sprintf("cache loadRemoteBodies: %s", key)) + err = r.mc.Set(&memcache.Item{Key: key, Value: []byte(cacheStr), Expiration: tlBodyCacheTTL}) + if err != nil { + span.RecordError(err) + continue + } + } + + return result, nil +} + func (r *repository) SetNormalizationCache(ctx context.Context, timelineID string, value string) error { return r.mc.Set(&memcache.Item{Key: normaalizationCachePrefix + timelineID, Value: []byte(value), Expiration: normaalizationCacheTTL}) } @@ -186,6 +585,12 @@ func (r *repository) Count(ctx context.Context) (int64, error) { item, err := r.mc.Get("timeline_count") if err != nil { span.RecordError(err) + + if errors.Is(err, memcache.ErrCacheMiss) { + r.setCurrentCount() + return 0, errors.Wrap(err, "trying to fix...") + } + return 0, err } @@ -264,235 +669,6 @@ func (r *repository) GetTimelineFromRemote(ctx context.Context, host string, key return timeline, nil } -func (r *repository) GetChunksFromRemote(ctx context.Context, host string, timelines []string, queryTime time.Time) (map[string]core.Chunk, error) { - ctx, span := tracer.Start(ctx, "Timeline.Repository.GetChunksFromRemote") - defer span.End() - - chunks, err := r.client.GetChunks(ctx, host, timelines, queryTime, nil) - if err != nil { - span.RecordError(err) - return nil, err - } - - currentSubsciptions := r.GetCurrentSubs(ctx) - - cacheChunks := make(map[string]core.Chunk) - for timelineID, chunk := range chunks { - if slices.Contains(currentSubsciptions, timelineID) { - cacheChunks[timelineID] = chunk - } - } - - err = r.SaveToCache(ctx, cacheChunks, queryTime) - if err != nil { - slog.ErrorContext( - ctx, "fail to save cache", - slog.String("error", err.Error()), - slog.String("module", "timeline"), - ) - span.RecordError(err) - return nil, err - } - - return chunks, nil -} - -// SaveToCache saves items to cache -func (r *repository) SaveToCache(ctx context.Context, chunks map[string]core.Chunk, queryTime time.Time) error { - ctx, span := tracer.Start(ctx, "Timeline.Repository.SaveToCache") - defer span.End() - - for timelineID, chunk := range chunks { - //save iterator - itrKey := "timeline:itr:all:" + timelineID + ":" + core.Time2Chunk(queryTime) - r.mc.Set(&memcache.Item{Key: itrKey, Value: []byte(chunk.Key)}) - - // save body - slices.Reverse(chunk.Items) - b, err := json.Marshal(chunk.Items) - if err != nil { - span.RecordError(err) - return err - } - value := string(b[1:len(b)-1]) + "," - err = r.mc.Set(&memcache.Item{Key: chunk.Key, Value: []byte(value)}) - if err != nil { - span.RecordError(err) - continue - } - } - return nil -} - -// GetChunksFromCache gets chunks from cache -func (r *repository) GetChunksFromCache(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) { - ctx, span := tracer.Start(ctx, "Timeline.Repository.GetChunksFromCache") - defer span.End() - - targetKeyMap, err := r.GetChunkIterators(ctx, timelines, chunk) - if err != nil { - span.RecordError(err) - return nil, err - } - - targetKeys := make([]string, 0) - for _, targetKey := range targetKeyMap { - targetKeys = append(targetKeys, targetKey) - } - - if len(targetKeys) == 0 { - return map[string]core.Chunk{}, nil - } - - caches, err := r.mc.GetMulti(targetKeys) - if err != nil { - span.RecordError(err) - return nil, err - } - - result := make(map[string]core.Chunk) - for _, timeline := range timelines { - targetKey := targetKeyMap[timeline] - cache, ok := caches[targetKey] - if !ok || len(cache.Value) == 0 { - continue - } - - var items []core.TimelineItem - cacheStr := string(cache.Value) - cacheStr = cacheStr[:len(cacheStr)-1] - cacheStr = "[" + cacheStr + "]" - err = json.Unmarshal([]byte(cacheStr), &items) - if err != nil { - span.RecordError(err) - continue - } - slices.Reverse(items) - result[timeline] = core.Chunk{ - Key: targetKey, - Items: items, - } - } - - return result, nil -} - -// GetChunksFromDB gets chunks from db and cache them -func (r *repository) GetChunksFromDB(ctx context.Context, timelines []string, chunk string) (map[string]core.Chunk, error) { - ctx, span := tracer.Start(ctx, "Timeline.Repository.GetChunksFromDB") - defer span.End() - - targetKeyMap, err := r.GetChunkIterators(ctx, timelines, chunk) - if err != nil { - span.RecordError(err) - return nil, err - } - - targetKeys := make([]string, 0) - for _, targetKey := range targetKeyMap { - targetKeys = append(targetKeys, targetKey) - } - - result := make(map[string]core.Chunk) - for _, timeline := range timelines { - targetKey := targetKeyMap[timeline] - var items []core.TimelineItem - chunkDate := core.Chunk2RecentTime(chunk) - - timelineID := timeline - if strings.Contains(timelineID, "@") { - timelineID = strings.Split(timelineID, "@")[0] - } - if len(timelineID) == 27 { - if timelineID[0] != 't' { - return nil, fmt.Errorf("timeline typed-id must start with 't'") - } - timelineID = timelineID[1:] - } - - err = r.db.WithContext(ctx).Where("timeline_id = ? and c_date <= ?", timelineID, chunkDate).Order("c_date desc").Limit(100).Find(&items).Error - if err != nil { - span.RecordError(err) - continue - } - - // append domain to timelineID - for i, item := range items { - items[i].TimelineID = item.TimelineID + "@" + r.config.FQDN - } - - result[timeline] = core.Chunk{ - Key: targetKey, - Items: items, - } - - // キャッシュには逆順で保存する - reversedItems := make([]core.TimelineItem, len(items)) - for i, item := range items { - reversedItems[len(items)-i-1] = item - } - b, err := json.Marshal(reversedItems) - if err != nil { - span.RecordError(err) - continue - } - cacheStr := string(b[1:len(b)-1]) + "," - err = r.mc.Set(&memcache.Item{Key: targetKey, Value: []byte(cacheStr)}) - if err != nil { - span.RecordError(err) - continue - } - } - - return result, nil -} - -// GetChunkIterators returns a list of iterated chunk keys -func (r *repository) GetChunkIterators(ctx context.Context, timelines []string, chunk string) (map[string]string, error) { - ctx, span := tracer.Start(ctx, "Timeline.Repository.GetChunkIterators") - defer span.End() - - keys := make([]string, len(timelines)) - for i, timeline := range timelines { - keys[i] = "timeline:itr:all:" + timeline + ":" + chunk - } - - cache, err := r.mc.GetMulti(keys) - if err != nil { - span.RecordError(err) - return nil, err - } - - result := make(map[string]string) - for i, timeline := range timelines { - if cache[keys[i]] != nil { // hit - result[timeline] = string(cache[keys[i]].Value) - } else { // miss - var item core.TimelineItem - chunkTime := core.Chunk2RecentTime(chunk) - dbid := timeline - if strings.Contains(dbid, "@") { - dbid = strings.Split(timeline, "@")[0] - } - if len(dbid) == 27 { - if dbid[0] != 't' { - return nil, fmt.Errorf("timeline typed-id must start with 't'") - } - dbid = dbid[1:] - } - err := r.db.WithContext(ctx).Where("timeline_id = ? and c_date <= ?", dbid, chunkTime).Order("c_date desc").First(&item).Error - if err != nil { - continue - } - key := "timeline:body:all:" + timeline + ":" + core.Time2Chunk(item.CDate) - r.mc.Set(&memcache.Item{Key: keys[i], Value: []byte(key)}) - result[timeline] = key - } - } - - return result, nil -} - // GetItem returns a timeline item by TimelineID and ObjectID func (r *repository) GetItem(ctx context.Context, timelineID string, objectID string) (core.TimelineItem, error) { ctx, span := tracer.Start(ctx, "Timeline.Repository.GetItem") @@ -546,38 +722,27 @@ func (r *repository) CreateItem(ctx context.Context, item core.TimelineItem) (co return item, err } - json = append(json, ',') - + val := "," + string(json) itemChunk := core.Time2Chunk(item.CDate) - cacheKey := "timeline:body:all:" + timelineID + ":" + itemChunk - - err = r.mc.Append(&memcache.Item{Key: cacheKey, Value: json}) - if err != nil { - // キャッシュに保存できなかった場合、新しいチャンクをDBから作成する必要がある - _, err = r.GetChunksFromDB(ctx, []string{timelineID}, itemChunk) - - // 再実行 (誤り: これをするとデータが重複するでしょ) - /* - err = r.mc.Append(&memcache.Item{Key: cacheKey, Value: json}) - if err != nil { - // これは致命的にプログラムがおかしい - log.Printf("failed to append cache: %v", err) - span.RecordError(err) - return item, err - } - */ - - if itemChunk != core.Time2Chunk(time.Now()) { - // イテレータを更新する - key := "timeline:itr:all:" + timelineID + ":" + itemChunk - dest := "timeline:body:all:" + timelineID + ":" + itemChunk - r.mc.Set(&memcache.Item{Key: key, Value: []byte(dest)}) - } - } + itrKey := tlItrCachePrefix + timelineID + ":" + itemChunk + cacheKey := tlBodyCachePrefix + timelineID + ":" + itemChunk + + // もし今からPrependするbodyブロックにイテレーターが向いてない場合は向きを変えておく必要がある + // これが発生するのは、タイムラインが久々に更新されたときで、最近のイテレーターが古いbodyブロックを向いている状態になっている + // そのため、イテレーターを更新しないと、古いbodyブロック(更新されない)を見続けてしまう為、新しく書き込んだデータが読み込まれない。 + // Note: + // この処理は今から挿入するアイテムが最新のチャンクであることが前提になっている。 + // 古いデータを挿入する場合は、書き込みを行ったチャンクから最新のチャンクまでのイテレーターを更新する必要があるかも。 + // 範囲でforを回して、キャッシュをdeleteする処理を追加する必要があるだろう... + span.AddEvent(fmt.Sprintf("cache CreateItem: %s -> %s", itrKey, cacheKey)) + err = r.mc.Replace(&memcache.Item{Key: itrKey, Value: []byte(itemChunk)}) + span.AddEvent(fmt.Sprintf("replace err: %v", err)) + err = r.mc.Prepend(&memcache.Item{Key: cacheKey, Value: []byte(val)}) + span.AddEvent(fmt.Sprintf("prepend err: %v", err)) item.TimelineID = "t" + item.TimelineID - return item, err + return item, nil } // DeleteItem deletes a timeline item @@ -814,28 +979,6 @@ func (r *repository) Subscribe(ctx context.Context, channels []string, event cha } } -func (r *repository) GetCurrentSubs(ctx context.Context) []string { - - query := r.rdb.PubSubChannels(ctx, "*") - channels := query.Val() - - uniqueChannelsMap := make(map[string]bool) - for _, channel := range channels { - uniqueChannelsMap[channel] = true - } - - uniqueChannels := make([]string, 0) - for channel := range uniqueChannelsMap { - split := strings.Split(channel, "@") - if len(split) != 2 { - continue - } - uniqueChannels = append(uniqueChannels, channel) - } - - return uniqueChannels -} - func (r *repository) Query(ctx context.Context, timelineID, schema, owner, author string, until time.Time, limit int) ([]core.TimelineItem, error) { ctx, span := tracer.Start(ctx, "Timeline.Repository.Query") defer span.End() diff --git a/x/timeline/repository_test.go b/x/timeline/repository_test.go index a867986f..4f05498e 100644 --- a/x/timeline/repository_test.go +++ b/x/timeline/repository_test.go @@ -3,28 +3,22 @@ package timeline import ( "context" "encoding/json" - "log" + "fmt" "testing" "time" - "github.com/bradfitz/gomemcache/memcache" "github.com/stretchr/testify/assert" "github.com/totegamma/concurrent/client/mock" "github.com/totegamma/concurrent/core" "github.com/totegamma/concurrent/core/mock" "github.com/totegamma/concurrent/internal/testutil" + "github.com/totegamma/concurrent/x/timeline/mock" "go.uber.org/mock/gomock" ) var ctx = context.Background() -var mc *memcache.Client -var repo Repository -var pivot time.Time - -func TestRepository(t *testing.T) { - - log.Println("Test Start") +func TestCreateItem(t *testing.T) { var cleanup_db func() db, cleanup_db := testutil.CreateDB() defer cleanup_db() @@ -34,10 +28,11 @@ func TestRepository(t *testing.T) { defer cleanup_rdb() var cleanup_mc func() - mc, cleanup_mc = testutil.CreateMC() + mc, cleanup_mc := testutil.CreateMC() defer cleanup_mc() - pivot = time.Now() + pivotEpoch := "6000" + pivotTime := core.EpochTime("6300") ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -47,230 +42,851 @@ func TestRepository(t *testing.T) { mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() mockClient := mock_client.NewMockClient(ctrl) + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, + } - repo = NewRepository(db, rdb, mc, mockClient, mockSchema, core.Config{}) - - // :: Timelineを作成 :: - timeline := core.Timeline{ + // シナリオ1: 1チャンク内のアイテム数がdefaultChunkSizeより少ない場合 + // Timelineを作成 + _, err := repo.UpsertTimeline(ctx, core.Timeline{ ID: "t00000000000000000000000000", Indexable: true, - Author: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", Schema: "https://example.com/testschema.json", Document: "{}", - } + }) + assert.NoError(t, err) - created, err := repo.UpsertTimeline(ctx, timeline) - if assert.NoError(t, err) { - assert.Equal(t, created.ID, timeline.ID) - assert.Equal(t, created.Indexable, timeline.Indexable) - assert.Equal(t, created.Author, timeline.Author) - assert.Equal(t, created.Schema, timeline.Schema) - assert.Equal(t, created.Document, timeline.Document) - assert.NotZero(t, created.CDate) - assert.NotZero(t, created.MDate) - } - - // :: Itemを作成 :: - item := core.TimelineItem{ - ResourceID: "mRGZKRZ5YTMTNDE9J0676P1TQAW", + // Itemを追加 + _, err = repo.CreateItem(ctx, core.TimelineItem{ + ResourceID: "m00000000000000000000000000", TimelineID: "t00000000000000000000000000", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 0), - } - - createdItem, err := repo.CreateItem(ctx, item) - if assert.NoError(t, err) { - assert.Equal(t, createdItem.ResourceID, item.ResourceID) - assert.Equal(t, createdItem.TimelineID, item.TimelineID) - assert.Equal(t, createdItem.Owner, item.Owner) - assert.NotZero(t, createdItem.CDate) - } + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }) - // :: ChunkIteratorが取得できることを確認 :: - pivotChunk := core.Time2Chunk(pivot) + // 取得してキャッシュを生成 + _, err = repo.loadLocalBody( + ctx, + "t00000000000000000000000000@local.example.com", + pivotEpoch, + ) + assert.NoError(t, err) + // Itemを追加 _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "mRV75ZS5R588QDNQ00676P1X440", + ResourceID: "m11111111111111111111111111", TimelineID: "t00000000000000000000000000", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 10), + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, }) assert.NoError(t, err) - // trial1: cache miss test - result, err := repo.GetChunkIterators(ctx, []string{"t00000000000000000000000000"}, pivotChunk) + // キャッシュされているか確認 + mcKey0 := tlBodyCachePrefix + "t00000000000000000000000000" + "@" + "local.example.com" + ":" + pivotEpoch + mcVal0, err := mc.Get(mcKey0) if assert.NoError(t, err) { - assert.Len(t, result, 1) + cacheStr := string(mcVal0.Value) + cacheStr = cacheStr[1:] + cacheStr = "[" + cacheStr + "]" + + var items []core.TimelineItem + err = json.Unmarshal([]byte(cacheStr), &items) + if assert.NoError(t, err) { + assert.Len(t, items, 2) + assert.Equal(t, "m11111111111111111111111111", items[0].ResourceID) + assert.Equal(t, "m00000000000000000000000000", items[1].ResourceID) + } } +} - itemKey := "timeline:body:all:t00000000000000000000000000:" + core.Time2Chunk(createdItem.CDate) - assert.Equal(t, result["t00000000000000000000000000"], itemKey) +func TestLoadChunkBodies(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() - // trial2: cache hit test - result2, err := repo.GetChunkIterators(ctx, []string{"t00000000000000000000000000"}, pivotChunk) - if assert.NoError(t, err) { - assert.Len(t, result2, 1) - assert.Equal(t, result2["t00000000000000000000000000"], itemKey) + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotEpoch := core.Time2Chunk(time.Now()) + pivotTime := core.Chunk2RecentTime(pivotEpoch) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + mockClient.EXPECT().GetChunkBodies( + gomock.Any(), + "remote.example.com", + map[string]string{ + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + nil, + ).Return( + map[string]core.Chunk{ + "t00000000000000000000000000@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m00000000000000000000000000", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }, + }, + }, + "t11111111111111111111111111@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m11111111111111111111111111", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }, + }, + }, + }, + nil, + ) + mockClient.EXPECT().GetChunkBodies( + gomock.Any(), + "remote.example.com", + map[string]string{ + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + nil, + ).Return( + map[string]core.Chunk{ + "t11111111111111111111111111@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m11111111111111111111111111", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }, + }, + }, + }, + nil, + ) + + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + mockKeeper.EXPECT().GetRemoteSubs().Return([]string{"t00000000000000000000000000@remote.example.com"}).Times(2) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, + } + + // Timelineを作成 + _, err := repo.UpsertTimeline(ctx, core.Timeline{ + ID: "t00000000000000000000000000", + Indexable: true, + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + Schema: "https://example.com/testschema.json", + Document: "{}", + }) + assert.NoError(t, err) + + // Itemを追加 + for i := 0; i < 40; i++ { + resourceID := fmt.Sprintf("m%026d", i) + _, err = repo.CreateItem(ctx, core.TimelineItem{ + ResourceID: resourceID, + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime.Add(-time.Minute * time.Duration(i)), + }) + } + + // 取得 + assert.Equal(t, repo.loadChunkBodiesCacheHits, int64(0)) + assert.Equal(t, repo.loadChunkBodiesCacheMisses, int64(0)) + bodies, err := repo.LoadChunkBodies( + ctx, + map[string]string{ + "t00000000000000000000000000@local.example.com": pivotEpoch, + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + ) + assert.NoError(t, err) + assert.Equal(t, repo.loadChunkBodiesCacheHits, int64(0)) + assert.Equal(t, repo.loadChunkBodiesCacheMisses, int64(3)) + + assert.Len(t, bodies, 3) + if assert.Contains(t, bodies, "t00000000000000000000000000@local.example.com") { + assert.Len(t, bodies["t00000000000000000000000000@local.example.com"].Items, 32) + } + if assert.Contains(t, bodies, "t00000000000000000000000000@remote.example.com") { + assert.Len(t, bodies["t00000000000000000000000000@remote.example.com"].Items, 1) + } + if assert.Contains(t, bodies, "t11111111111111111111111111@remote.example.com") { + assert.Len(t, bodies["t11111111111111111111111111@remote.example.com"].Items, 1) + } + + // ちゃんとキャッシュされているか確認 + bodies, err = repo.LoadChunkBodies( + ctx, + map[string]string{ + "t00000000000000000000000000@local.example.com": pivotEpoch, + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + ) + assert.NoError(t, err) + assert.Equal(t, repo.loadChunkBodiesCacheHits, int64(2)) + assert.Equal(t, repo.loadChunkBodiesCacheMisses, int64(4)) + + assert.Len(t, bodies, 3) + if assert.Contains(t, bodies, "t00000000000000000000000000@local.example.com") { + assert.Len(t, bodies["t00000000000000000000000000@local.example.com"].Items, 32) + } + if assert.Contains(t, bodies, "t00000000000000000000000000@remote.example.com") { + assert.Len(t, bodies["t00000000000000000000000000@remote.example.com"].Items, 1) + } + if assert.Contains(t, bodies, "t11111111111111111111111111@remote.example.com") { + assert.Len(t, bodies["t11111111111111111111111111@remote.example.com"].Items, 1) + } + +} + +func TestLookupChunkItrs(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() + + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotTime := time.Now() + pivotEpoch := core.Time2Chunk(pivotTime) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + mockClient.EXPECT().GetChunkItrs( + gomock.Any(), + "remote.example.com", + []string{ + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }, + pivotEpoch, + gomock.Any(), + ).Return( + map[string]string{ + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + nil, + ) + + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + mockKeeper.EXPECT().GetRemoteSubs().Return([]string{ + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, } - // :: Timeline1を作成してItemを追加 :: + // Timelineを作成 + _, err := repo.UpsertTimeline(ctx, core.Timeline{ + ID: "t00000000000000000000000000", + Indexable: true, + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + Schema: "https://example.com/testschema.json", + Document: "{}", + }) + assert.NoError(t, err) + _, err = repo.UpsertTimeline(ctx, core.Timeline{ ID: "t11111111111111111111111111", Indexable: true, - Author: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", Schema: "https://example.com/testschema.json", Document: "{}", }) assert.NoError(t, err) + itemPivotTime := pivotTime.Add(-time.Minute * 0) + //itemPivotEpoch := core.Time2Chunk(itemPivotTime) + + // Itemを追加 _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "m5JY6724DKGDBCMP60676P2055M", - TimelineID: "t11111111111111111111111111", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 0), + ResourceID: "m00000000000000000000000000", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: itemPivotTime, }) - assert.NoError(t, err) _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "m5KV37HA63HVE7KNP0676P228RM", + ResourceID: "m11111111111111111111111111", TimelineID: "t11111111111111111111111111", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 10), + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: itemPivotTime, + }) + + // 取得 + assert.Equal(t, repo.lookupChunkItrsCacheHits, int64(0)) + assert.Equal(t, repo.lookupChunkItrsCacheMisses, int64(0)) + itrs, err := repo.LookupChunkItrs( + ctx, + []string{ + "t00000000000000000000000000@local.example.com", + "t11111111111111111111111111@local.example.com", + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }, + pivotEpoch, + ) + assert.NoError(t, err) + assert.Equal(t, repo.lookupChunkItrsCacheHits, int64(0)) + assert.Equal(t, repo.lookupChunkItrsCacheMisses, int64(4)) + + assert.Len(t, itrs, 4) + if assert.Contains(t, itrs, "t00000000000000000000000000@local.example.com") { + assert.Equal(t, pivotEpoch, itrs["t00000000000000000000000000@local.example.com"]) + } + if assert.Contains(t, itrs, "t11111111111111111111111111@local.example.com") { + assert.Equal(t, pivotEpoch, itrs["t11111111111111111111111111@local.example.com"]) + } + if assert.Contains(t, itrs, "t00000000000000000000000000@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t00000000000000000000000000@remote.example.com"]) + } + if assert.Contains(t, itrs, "t11111111111111111111111111@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t11111111111111111111111111@remote.example.com"]) + } + + // use cache + itrs, err = repo.LookupChunkItrs( + ctx, + []string{ + "t00000000000000000000000000@local.example.com", + "t11111111111111111111111111@local.example.com", + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }, + pivotEpoch, + ) + assert.NoError(t, err) + assert.Equal(t, repo.lookupChunkItrsCacheHits, int64(4)) + assert.Equal(t, repo.lookupChunkItrsCacheMisses, int64(4)) + + assert.Len(t, itrs, 4) + if assert.Contains(t, itrs, "t00000000000000000000000000@local.example.com") { + assert.Equal(t, pivotEpoch, itrs["t00000000000000000000000000@local.example.com"]) + } + if assert.Contains(t, itrs, "t11111111111111111111111111@local.example.com") { + assert.Equal(t, pivotEpoch, itrs["t11111111111111111111111111@local.example.com"]) + } + if assert.Contains(t, itrs, "t00000000000000000000000000@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t00000000000000000000000000@remote.example.com"]) + } + if assert.Contains(t, itrs, "t11111111111111111111111111@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t11111111111111111111111111@remote.example.com"]) + } +} + +func TestLoadRemoteBodies(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() + + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotTime := time.Now() + pivotEpoch := core.Time2Chunk(pivotTime) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + mockClient.EXPECT().GetChunkBodies( + gomock.Any(), + "remote.example.com", + map[string]string{ + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + nil, + ).Return( + map[string]core.Chunk{ + "t00000000000000000000000000@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m00000000000000000000000000", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }, + }, + }, + "t11111111111111111111111111@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m11111111111111111111111111", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime, + }, + }, + }, + }, + nil, + ) + + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + mockKeeper.EXPECT().GetRemoteSubs().Return([]string{ + "t00000000000000000000000000@remote.example.com", }) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, + } + + bodies, err := repo.loadRemoteBodies( + ctx, + "remote.example.com", + map[string]string{ + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + ) assert.NoError(t, err) + assert.Len(t, bodies, 2) + if assert.Contains(t, bodies, "t00000000000000000000000000@remote.example.com") { + assert.Len(t, bodies["t00000000000000000000000000@remote.example.com"].Items, 1) + assert.Equal(t, "m00000000000000000000000000", bodies["t00000000000000000000000000@remote.example.com"].Items[0].ResourceID) + } + + if assert.Contains(t, bodies, "t11111111111111111111111111@remote.example.com") { + assert.Len(t, bodies["t11111111111111111111111111@remote.example.com"].Items, 1) + assert.Equal(t, "m11111111111111111111111111", bodies["t11111111111111111111111111@remote.example.com"].Items[0].ResourceID) + } + + // ちゃんとキャッシュされているか確認 + mcKey1 := tlBodyCachePrefix + "t00000000000000000000000000@remote.example.com:" + pivotEpoch + mcVal1, err := mc.Get(mcKey1) + if assert.NoError(t, err) { + cacheStr := string(mcVal1.Value) + cacheStr = cacheStr[1:] + cacheStr = "[" + cacheStr + "]" + + var items []core.TimelineItem + err = json.Unmarshal([]byte(cacheStr), &items) + if assert.NoError(t, err) { + assert.Len(t, items, 1) + assert.Equal(t, "m00000000000000000000000000", items[0].ResourceID) + } + } + + mcKey2 := tlBodyCachePrefix + "t11111111111111111111111111@remote.example.com:" + pivotEpoch + _, err = mc.Get(mcKey2) + assert.Error(t, err) // こっちはsubscribeされてないのでキャッシュされない +} + +func TestLookupRemoteItrs(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() + + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotTime := time.Now() + pivotEpoch := core.Time2Chunk(pivotTime) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + mockClient.EXPECT().GetChunkItrs( + gomock.Any(), + "remote.example.com", + []string{ + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }, + pivotEpoch, + gomock.Any(), + ).Return( + map[string]string{ + "t00000000000000000000000000@remote.example.com": pivotEpoch, + "t11111111111111111111111111@remote.example.com": pivotEpoch, + }, + nil, + ) - // Insertによるキャッシュ更新を一旦クリア - mc.DeleteAll() + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + mockKeeper.EXPECT().GetRemoteSubs().Return([]string{ + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }) - // GetChunksFromCacheでキャッシュがないはずなので何も帰ってこないことを確認 - chunks, err := repo.GetChunksFromCache(ctx, []string{"t00000000000000000000000000", "t11111111111111111111111111"}, pivotChunk) + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, + } + + itrs, err := repo.lookupRemoteItrs( + ctx, + "remote.example.com", + []string{ + "t00000000000000000000000000@remote.example.com", + "t11111111111111111111111111@remote.example.com", + }, + pivotEpoch, + ) assert.NoError(t, err) - assert.Len(t, chunks, 0) + assert.Len(t, itrs, 2) + if assert.Contains(t, itrs, "t00000000000000000000000000@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t00000000000000000000000000@remote.example.com"]) + } + + if assert.Contains(t, itrs, "t11111111111111111111111111@remote.example.com") { + assert.Equal(t, pivotEpoch, itrs["t11111111111111111111111111@remote.example.com"]) + } - // GetChunksFromDBで要素を取得する - chunks, err = repo.GetChunksFromDB(ctx, []string{"t00000000000000000000000000", "t11111111111111111111111111"}, pivotChunk) + // ちゃんとキャッシュされているか確認 + mcKey1 := tlItrCachePrefix + "t00000000000000000000000000@remote.example.com:" + pivotEpoch + mcKey2 := tlItrCachePrefix + "t11111111111111111111111111@remote.example.com:" + pivotEpoch + mcVal1, err := mc.Get(mcKey1) if assert.NoError(t, err) { - assert.Len(t, chunks, 2) - assert.Len(t, chunks["t00000000000000000000000000"].Items, 2) - assert.Len(t, chunks["t11111111111111111111111111"].Items, 2) + assert.Equal(t, pivotEpoch, string(mcVal1.Value)) } - // GetChunksFromCacheでキャッシュがあるはずなのでキャッシュから取得する - chunks, err = repo.GetChunksFromCache(ctx, []string{"t00000000000000000000000000", "t11111111111111111111111111"}, pivotChunk) + mcVal2, err := mc.Get(mcKey2) if assert.NoError(t, err) { - assert.Len(t, chunks, 2) - assert.Len(t, chunks["t00000000000000000000000000"].Items, 2) - assert.Len(t, chunks["t11111111111111111111111111"].Items, 2) + assert.Equal(t, pivotEpoch, string(mcVal2.Value)) + } +} + +func TestLookupLocalItrs(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() + + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotTime := time.Now() + pivotEpoch := core.Time2Chunk(pivotTime) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", + }, } - // TimelineItemの順番のテスト + // Timelineを作成 + _, err := repo.UpsertTimeline(ctx, core.Timeline{ + ID: "t00000000000000000000000000", + Indexable: true, + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + Schema: "https://example.com/testschema.json", + Document: "{}", + }) + assert.NoError(t, err) _, err = repo.UpsertTimeline(ctx, core.Timeline{ - ID: "t22222222222222222222222222", + ID: "t11111111111111111111111111", Indexable: true, - Author: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", Schema: "https://example.com/testschema.json", Document: "{}", }) assert.NoError(t, err) + itemPivotTime := pivotTime.Add(-time.Minute * 0) + itemPivotEpoch := core.Time2Chunk(itemPivotTime) + + // Itemを追加 _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "mA1HJCH9NK9MPMV7D0676P25PSR", - TimelineID: "t22222222222222222222222222", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 10), + ResourceID: "m00000000000000000000000000", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: itemPivotTime, }) - assert.NoError(t, err) _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "mW4H1PZZ223D1B6ED0676P27J50", - TimelineID: "t22222222222222222222222222", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 5), + ResourceID: "m11111111111111111111111111", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: itemPivotTime, }) - assert.NoError(t, err) - mc.DeleteAll() + // 取得 + itrs, err := repo.lookupLocalItrs( + ctx, + []string{"t00000000000000000000000000@local.example.com", "t11111111111111111111111111@local.example.com"}, + pivotEpoch, + ) + assert.NoError(t, err) + assert.Len(t, itrs, 2) + if assert.Contains(t, itrs, "t00000000000000000000000000@local.example.com") { + assert.Equal(t, itemPivotEpoch, itrs["t00000000000000000000000000@local.example.com"]) + } + if assert.Contains(t, itrs, "t11111111111111111111111111@local.example.com") { + assert.Equal(t, itemPivotEpoch, itrs["t11111111111111111111111111@local.example.com"]) + } - chunks, err = repo.GetChunksFromDB(ctx, []string{"t22222222222222222222222222"}, pivotChunk) + // ちゃんとキャッシュされているか確認 + mcKey1 := tlItrCachePrefix + "t00000000000000000000000000@local.example.com:" + pivotEpoch + mcKey2 := tlItrCachePrefix + "t11111111111111111111111111@local.example.com:" + pivotEpoch + mcVal1, err := mc.Get(mcKey1) if assert.NoError(t, err) { - assert.Len(t, chunks, 1) - assert.Len(t, chunks["t22222222222222222222222222"].Items, 2) - assert.Equal(t, "mW4H1PZZ223D1B6ED0676P27J50", chunks["t22222222222222222222222222"].Items[0].ResourceID) - assert.Equal(t, "mA1HJCH9NK9MPMV7D0676P25PSR", chunks["t22222222222222222222222222"].Items[1].ResourceID) + assert.Equal(t, itemPivotEpoch, string(mcVal1.Value)) } + mcVal2, err := mc.Get(mcKey2) + if assert.NoError(t, err) { + assert.Equal(t, itemPivotEpoch, string(mcVal2.Value)) + } +} - _, err = repo.CreateItem(ctx, core.TimelineItem{ - ResourceID: "mT46G7BT5TJQQS4WY0676P2A9ZM", - TimelineID: "t22222222222222222222222222", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 1), - }) - assert.NoError(t, err) +func TestLoadLocalBody(t *testing.T) { + var cleanup_db func() + db, cleanup_db := testutil.CreateDB() + defer cleanup_db() - chunks, err = repo.GetChunksFromDB(ctx, []string{"t22222222222222222222222222"}, pivotChunk) - if assert.NoError(t, err) { - assert.Len(t, chunks, 1) - assert.Len(t, chunks["t22222222222222222222222222"].Items, 3) - assert.Equal(t, "mT46G7BT5TJQQS4WY0676P2A9ZM", chunks["t22222222222222222222222222"].Items[0].ResourceID) - assert.Equal(t, "mW4H1PZZ223D1B6ED0676P27J50", chunks["t22222222222222222222222222"].Items[1].ResourceID) - assert.Equal(t, "mA1HJCH9NK9MPMV7D0676P25PSR", chunks["t22222222222222222222222222"].Items[2].ResourceID) - } - - remoteKey0 := "timeline:body:all:t00000000000000000000000000@remote.com:" + core.Time2Chunk(pivot.Add(-time.Minute*10)) - remoteKey1 := "timeline:body:all:t11111111111111111111111111@remote.com:" + core.Time2Chunk(pivot.Add(-time.Minute*30)) - - // test SaveToCache - testchunks := make(map[string]core.Chunk) - testchunks["t00000000000000000000000000@remote.com"] = core.Chunk{ - Key: remoteKey0, - Items: []core.TimelineItem{ - { - ResourceID: "mDMZMRRS7N16E1PDN0676P2QH6C", - TimelineID: "t00000000000000000000000000@remote.com", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 10), - }, + var cleanup_rdb func() + rdb, cleanup_rdb := testutil.CreateRDB() + defer cleanup_rdb() + + var cleanup_mc func() + mc, cleanup_mc := testutil.CreateMC() + defer cleanup_mc() + + pivotEpoch := core.Time2Chunk(time.Now()) + pivotTime := core.Chunk2RecentTime(pivotEpoch) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSchema := mock_core.NewMockSchemaService(ctrl) + mockSchema.EXPECT().UrlToID(gomock.Any(), gomock.Any()).Return(uint(0), nil).AnyTimes() + mockSchema.EXPECT().IDToUrl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + mockClient := mock_client.NewMockClient(ctrl) + + mockKeeper := mock_timeline.NewMockKeeper(ctrl) + + repo := repository{ + db: db, + rdb: rdb, + mc: mc, + keeper: mockKeeper, + client: mockClient, + schema: mockSchema, + config: core.Config{ + FQDN: "local.example.com", }, } - testJson0, err := json.Marshal(testchunks["t00000000000000000000000000@remote.com"].Items[0]) - testJson0 = append(testJson0, ',') - testchunks["t11111111111111111111111111@remote.com"] = core.Chunk{ - Key: remoteKey1, - Items: []core.TimelineItem{ - { - ResourceID: "mD895NMA837R0C6B90676P2S1J4", - TimelineID: "t11111111111111111111111111@remote.com", - Owner: "con18fyqn098jsf6cnw2r8hkjt7zeftfa0vqvjr6fe", - CDate: pivot.Add(-time.Minute * 30), - }, - }, + + // シナリオ1: 1チャンク内のアイテム数がdefaultChunkSizeより少ない場合 + // Timelineを作成 + _, err := repo.UpsertTimeline(ctx, core.Timeline{ + ID: "t00000000000000000000000000", + Indexable: true, + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + Schema: "https://example.com/testschema.json", + Document: "{}", + }) + assert.NoError(t, err) + + // Itemを追加 + for i := 0; i < 40; i++ { + resourceID := fmt.Sprintf("m%026d", i) + _, err = repo.CreateItem(ctx, core.TimelineItem{ + ResourceID: resourceID, + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime.Add(-time.Minute * time.Duration(i)), + }) } - testJson1, err := json.Marshal(testchunks["t11111111111111111111111111@remote.com"].Items[0]) - testJson1 = append(testJson1, ',') - err = repo.SaveToCache(ctx, testchunks, pivot) + // 取得 + chunk0, err := repo.loadLocalBody( + ctx, + "t00000000000000000000000000@local.example.com", + pivotEpoch, + ) + assert.NoError(t, err) + assert.Equal(t, chunk0.Epoch, pivotEpoch) + assert.Len(t, chunk0.Items, 32) // defaultChunkSizeの数入っているはず + + // ちゃんとキャッシュされているか確認 + mcKey0 := tlBodyCachePrefix + "t00000000000000000000000000@local.example.com:" + pivotEpoch + mcVal0, err := mc.Get(mcKey0) if assert.NoError(t, err) { - itrkey0 := "timeline:itr:all:t00000000000000000000000000@remote.com:" + pivotChunk - remoteCache0, err := mc.Get(itrkey0) - if assert.NoError(t, err) { - assert.Equal(t, remoteKey0, string(remoteCache0.Value)) - } - itrKey1 := "timeline:itr:all:t11111111111111111111111111@remote.com:" + pivotChunk - remoteCache1, err := mc.Get(itrKey1) - if assert.NoError(t, err) { - assert.Equal(t, remoteKey1, string(remoteCache1.Value)) - } + cacheStr := string(mcVal0.Value) + cacheStr = cacheStr[1:] + cacheStr = "[" + cacheStr + "]" - remoteCache0, err = mc.Get(remoteKey0) + var items []core.TimelineItem + err = json.Unmarshal([]byte(cacheStr), &items) if assert.NoError(t, err) { - assert.Equal(t, string(testJson0), string(remoteCache0.Value)) + assert.Len(t, items, 32) + assert.Equal(t, "m00000000000000000000000000", items[0].ResourceID) } + } - remoteCache1, err = mc.Get(remoteKey1) + // シナリオ2: 1チャンク内のアイテム数がdefaultChunkSizeより多い場合 + // Timelineを作成 + _, err = repo.UpsertTimeline(ctx, core.Timeline{ + ID: "t11111111111111111111111111", + Indexable: true, + Author: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + Schema: "https://example.com/testschema.json", + Document: "{}", + }) + assert.NoError(t, err) + + // Itemを追加 + for i := 0; i < 40; i++ { + resourceID := fmt.Sprintf("m%026d", i) + _, err = repo.CreateItem(ctx, core.TimelineItem{ + ResourceID: resourceID, + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: pivotTime.Add(-time.Minute * time.Duration(i) / 10), //ツメツメで入れる + }) + } + + // 取得 + chunk1, err := repo.loadLocalBody( + ctx, + "t11111111111111111111111111@local.example.com", + pivotEpoch, + ) + assert.NoError(t, err) + assert.Equal(t, chunk1.Epoch, pivotEpoch) + assert.Len(t, chunk1.Items, 40) // 全アイテムが入っているはず + + // ちゃんとキャッシュされているか確認 + mcKey1 := tlBodyCachePrefix + "t11111111111111111111111111@local.example.com:" + pivotEpoch + mcVal1, err := mc.Get(mcKey1) + if assert.NoError(t, err) { + + cacheStr := string(mcVal1.Value) + cacheStr = cacheStr[1:] + cacheStr = "[" + cacheStr + "]" + + var items []core.TimelineItem + err = json.Unmarshal([]byte(cacheStr), &items) if assert.NoError(t, err) { - assert.Equal(t, string(testJson1), string(remoteCache1.Value)) + assert.Len(t, items, 40) + assert.Equal(t, "m00000000000000000000000000", items[0].ResourceID) } } + } diff --git a/x/timeline/service.go b/x/timeline/service.go index aaa367cb..603c06dd 100644 --- a/x/timeline/service.go +++ b/x/timeline/service.go @@ -1,6 +1,7 @@ package timeline import ( + "container/heap" "context" "encoding/json" "fmt" @@ -11,6 +12,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -52,6 +54,11 @@ func NewService( } } +func jsonPrint(tag string, obj interface{}) { + b, _ := json.MarshalIndent(obj, "", " ") + fmt.Println(tag, string(b)) +} + // Count returns the count number of messages func (s *service) Count(ctx context.Context) (int64, error) { ctx, span := tracer.Start(ctx, "Timeline.Service.Count") @@ -67,13 +74,13 @@ func min(a, b int) int { return b } -func (s *service) LookupChunkItr(ctx context.Context, timeliens []string, epoch string) (map[string]string, error) { - ctx, span := tracer.Start(ctx, "Timeline.Service.LookupChunkItr") +func (s *service) GetChunks(ctx context.Context, timelines []string, epoch string) (map[string]core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Timeline.Service.GetChunks") defer span.End() normalized := make([]string, 0) normtable := make(map[string]string) - for _, timeline := range timeliens { + for _, timeline := range timelines { normalizedTimeline, err := s.NormalizeTimelineID(ctx, timeline) if err != nil { slog.WarnContext( @@ -87,59 +94,33 @@ func (s *service) LookupChunkItr(ctx context.Context, timeliens []string, epoch normtable[normalizedTimeline] = timeline } - table, err := s.repository.GetChunkIterators(ctx, normalized, epoch) + query, err := s.repository.LookupChunkItrs(ctx, normalized, epoch) if err != nil { span.RecordError(err) return nil, err } - recovered := make(map[string]string) - for k, v := range table { - split := strings.Split(v, ":") - recovered[normtable[k]] = split[len(split)-1] + chunks, err := s.repository.LoadChunkBodies(ctx, query) + if err != nil { + span.RecordError(err) + return nil, err } - return recovered, nil -} - -func (s *service) LoadChunkBody(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) { - ctx, span := tracer.Start(ctx, "Timeline.Service.LoadChunkBody") - defer span.End() - - result := make(map[string]core.Chunk) - for k, v := range query { - time := core.Chunk2RecentTime(v) - - chunks, err := s.GetChunks(ctx, []string{k}, time) - if err != nil { - span.RecordError(err) - continue - } - if len(chunks) == 0 { - continue - } - - var key string - for l := range chunks { - key = l - break - } + recovered := make(map[string]core.Chunk) + for k, v := range chunks { + recovered[normtable[k]] = v + } - result[k] = core.Chunk{ - Epoch: v, - Items: chunks[key].Items, + // for backward compatibility + for f, t := range normtable { + if chunk, ok := recovered[t]; !ok { + chunk.Key = strings.Replace(chunk.Key, "tl:body:", "timeline:body:all:", 1) + chunk.Key = strings.Replace(chunk.Key, t, f, 1) + recovered[f] = chunk } } - return result, nil -} - -func (s *service) CurrentRealtimeConnectionCount() int64 { - return atomic.LoadInt64(&s.socketCounter) -} - -func (s *service) GetChunksFromRemote(ctx context.Context, host string, timelines []string, pivot time.Time) (map[string]core.Chunk, error) { - return s.repository.GetChunksFromRemote(ctx, host, timelines, pivot) + return recovered, nil } // NormalizeTimelineID normalizes timelineID @@ -216,52 +197,74 @@ func (s *service) NormalizeTimelineID(ctx context.Context, timeline string) (str return normalized, nil } -// GetChunks returns chunks by timelineID and time -func (s *service) GetChunks(ctx context.Context, timelines []string, until time.Time) (map[string]core.Chunk, error) { - ctx, span := tracer.Start(ctx, "Timeline.Service.GetChunks") +func (s *service) LookupChunkItr(ctx context.Context, timeliens []string, epoch string) (map[string]string, error) { + ctx, span := tracer.Start(ctx, "Timeline.Service.LookupChunkItr") defer span.End() - // normalize timelineID and validate - for i, timeline := range timelines { - normalized, err := s.NormalizeTimelineID(ctx, timeline) + normalized := make([]string, 0) + normtable := make(map[string]string) + for _, timeline := range timeliens { + normalizedTimeline, err := s.NormalizeTimelineID(ctx, timeline) if err != nil { + slog.WarnContext( + ctx, + fmt.Sprintf("failed to normalize timeline: %s", timeline), + slog.String("module", "timeline"), + ) continue } - timelines[i] = normalized + normalized = append(normalized, normalizedTimeline) + normtable[normalizedTimeline] = timeline } - // first, try to get from cache - untilChunk := core.Time2Chunk(until) - items, err := s.repository.GetChunksFromCache(ctx, timelines, untilChunk) + table, err := s.repository.LookupChunkItrs(ctx, normalized, epoch) if err != nil { - slog.ErrorContext(ctx, "failed to get chunks from cache", slog.String("error", err.Error()), slog.String("module", "timeline")) span.RecordError(err) return nil, err } - // if not found in cache, get from db - missingTimelines := make([]string, 0) - for _, timeline := range timelines { - if _, ok := items[timeline]; !ok { - missingTimelines = append(missingTimelines, timeline) - } + recovered := make(map[string]string) + for k, v := range table { + split := strings.Split(v, ":") + recovered[normtable[k]] = split[len(split)-1] } - if len(missingTimelines) > 0 { - // get from db - dbItems, err := s.repository.GetChunksFromDB(ctx, missingTimelines, untilChunk) + return recovered, nil +} + +func (s *service) LoadChunkBody(ctx context.Context, query map[string]string) (map[string]core.Chunk, error) { + ctx, span := tracer.Start(ctx, "Timeline.Service.LoadChunkBody") + defer span.End() + + normalized := map[string]string{} + normtable := map[string]string{} + + for k, v := range query { + normalizedTimeline, err := s.NormalizeTimelineID(ctx, k) if err != nil { - slog.ErrorContext(ctx, "failed to get chunks from db", slog.String("error", err.Error()), slog.String("module", "timeline")) - span.RecordError(err) - return nil, err - } - // merge - for k, v := range dbItems { - items[k] = v + slog.WarnContext( + ctx, + fmt.Sprintf("failed to normalize timeline: %s", k), + slog.String("module", "timeline"), + ) + continue } + normalized[normalizedTimeline] = v + normtable[normalizedTimeline] = k } - return items, nil + result, err := s.repository.LoadChunkBodies(ctx, normalized) + if err != nil { + span.RecordError(err) + return nil, err + } + + recovered := map[string]core.Chunk{} + for k, v := range result { + recovered[normtable[k]] = v + } + + return recovered, nil } func (s *service) GetRecentItemsFromSubscription(ctx context.Context, subscription string, until time.Time, limit int) ([]core.TimelineItem, error) { @@ -281,96 +284,129 @@ func (s *service) GetRecentItemsFromSubscription(ctx context.Context, subscripti return s.GetRecentItems(ctx, timelines, until, limit) } +type QueueItem struct { + Timeline string + Epoch string + Item core.TimelineItem + Index int +} + +type PriorityQueue []*QueueItem + +func (pq PriorityQueue) Len() int { return len(pq) } +func (pq PriorityQueue) Less(i, j int) bool { + return pq[i].Item.CDate.After(pq[j].Item.CDate) +} +func (pq PriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} +func (pq *PriorityQueue) Push(x interface{}) { + item := x.(*QueueItem) + *pq = append(*pq, item) +} +func (pq *PriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + *pq = old[0 : n-1] + return item +} + // GetRecentItems returns recent message from timelines func (s *service) GetRecentItems(ctx context.Context, timelines []string, until time.Time, limit int) ([]core.TimelineItem, error) { ctx, span := tracer.Start(ctx, "Timeline.Service.GetRecentItems") defer span.End() - var domainMap = make(map[string][]string) + span.SetAttributes(attribute.StringSlice("timelines", timelines)) - // normalize timelineID and validate - for i, timeline := range timelines { - normalized, err := s.NormalizeTimelineID(ctx, timeline) - if err != nil { + epoch := core.Time2Chunk(until) + chunks, err := s.GetChunks(ctx, timelines, epoch) + if err != nil { + span.RecordError(err) + return nil, err + } + + span.SetAttributes(attribute.Int("chunks", len(chunks))) + + pq := make(PriorityQueue, 0) + heap.Init(&pq) + + for timeline, chunk := range chunks { + + if len(chunk.Items) <= 0 { + span.AddEvent(fmt.Sprintf("empty chunk: %s", timeline)) continue } - split := strings.Split(normalized, "@") - domain := split[len(split)-1] - if len(split) >= 2 { - if _, ok := domainMap[domain]; !ok { - domainMap[domain] = make([]string, 0) - } - if domain == s.config.FQDN { - domainMap[domain] = append(domainMap[domain], split[0]) - } else { - domainMap[domain] = append(domainMap[domain], timeline) - } + index := sort.Search(len(chunk.Items), func(i int) bool { + return chunk.Items[i].CDate.Before(until) + }) + + if index >= len(chunk.Items) { + span.AddEvent(fmt.Sprintf("no item in target range: %s", timeline)) + continue } - timelines[i] = normalized - } + heap.Push(&pq, &QueueItem{ + Timeline: timeline, + Epoch: epoch, + Item: chunk.Items[index], + Index: index, + }) + } + + var result []core.TimelineItem + var uniq = make(map[string]bool) + + var itrlimit = 1000 + for len(result) < limit && pq.Len() > 0 && itrlimit > 0 { + itrlimit-- + smallest := heap.Pop(&pq).(*QueueItem) + _, exists := uniq[smallest.Item.ResourceID] + if !exists { + result = append(result, smallest.Item) + uniq[smallest.Item.ResourceID] = true + } - // first, try to get from cache regardless of local or remote - untilChunk := core.Time2Chunk(until) - items, err := s.repository.GetChunksFromCache(ctx, timelines, untilChunk) - if err != nil { - slog.ErrorContext(ctx, "failed to get chunks from cache", slog.String("error", err.Error()), slog.String("module", "timeline")) - span.RecordError(err) - return nil, err - } + nextIndex := smallest.Index + 1 + timeline := smallest.Timeline - for host, timelines := range domainMap { - if host == s.config.FQDN { - chunks, err := s.repository.GetChunksFromDB(ctx, timelines, untilChunk) - if err != nil { - slog.ErrorContext(ctx, "failed to get chunks from db", slog.String("error", err.Error()), slog.String("module", "timeline")) - span.RecordError(err) - return nil, err - } - for timeline, chunk := range chunks { - items[timeline] = chunk - } + if nextIndex < len(chunks[timeline].Items) { + heap.Push(&pq, &QueueItem{ + Timeline: timeline, + Epoch: smallest.Epoch, + Item: chunks[timeline].Items[nextIndex], + Index: nextIndex, + }) } else { - chunks, err := s.repository.GetChunksFromRemote(ctx, host, timelines, until) + prevEpoch := core.Time2Chunk(smallest.Item.CDate) + if prevEpoch == smallest.Epoch { + prevEpoch = core.PrevChunk(prevEpoch) + } + prevChunks, err := s.GetChunks(ctx, []string{timeline}, prevEpoch) if err != nil { - slog.ErrorContext(ctx, "failed to get chunks from remote", slog.String("error", err.Error()), slog.String("module", "timeline")) span.RecordError(err) continue } - for timeline, chunk := range chunks { - items[timeline] = chunk + if prevChunk, ok := prevChunks[timeline]; ok { + if len(prevChunk.Items) <= 0 { + span.AddEvent("empty chunk") + continue + } + chunks[timeline] = prevChunk + heap.Push(&pq, &QueueItem{ + Timeline: timeline, + Epoch: prevEpoch, + Item: prevChunk.Items[0], + Index: 0, + }) } } } - // summary messages and remove earlier than until - var messages []core.TimelineItem - for _, item := range items { - for _, timelineItem := range item.Items { - if timelineItem.CDate.After(until) { - continue - } - messages = append(messages, timelineItem) - } - } + span.SetAttributes(attribute.Int("iterating", 1000-itrlimit)) - var uniq []core.TimelineItem - m := make(map[string]bool) - for _, elem := range messages { - if !m[elem.ResourceID] { - m[elem.ResourceID] = true - uniq = append(uniq, elem) - } - } - - sort.Slice(uniq, func(l, r int) bool { - return uniq[l].CDate.After(uniq[r].CDate) - }) - - chopped := uniq[:min(len(uniq), limit)] - - return chopped, nil + return result, nil } func (s *service) GetImmediateItemsFromSubscription(ctx context.Context, subscription string, since time.Time, limit int) ([]core.TimelineItem, error) { @@ -541,10 +577,10 @@ func (s *service) Event(ctx context.Context, mode core.CommitMode, document, sig event := core.Event{ Timeline: doc.Timeline, - Item: doc.Item, + Item: &doc.Item, Document: doc.Document, Signature: doc.Signature, - Resource: doc.Resource, + Resource: &doc.Resource, } return event, s.repository.PublishEvent(ctx, event) @@ -1044,3 +1080,63 @@ func (s *service) Query(ctx context.Context, timelineID, schema, owner, author s return items, nil } + +var ( + lookupChunkItrsTotal *prometheus.GaugeVec + loadChunkBodiesTotal *prometheus.GaugeVec + timelineRealtimeConnectionMetrics prometheus.Gauge + outerConnection *prometheus.GaugeVec +) + +func (s *service) UpdateMetrics() { + + metrics := s.repository.GetMetrics() + + if lookupChunkItrsTotal == nil { + lookupChunkItrsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cc_timeline_lookup_chunk_itr_total", + Help: "Total number of lookup chunk iterators", + }, []string{"status"}) + prometheus.MustRegister(lookupChunkItrsTotal) + } + + lookupChunkItrsTotal.WithLabelValues("hit").Set(float64(metrics["lookup_chunk_itr_cache_hits"])) + lookupChunkItrsTotal.WithLabelValues("miss").Set(float64(metrics["lookup_chunk_itr_cache_misses"])) + + if loadChunkBodiesTotal == nil { + loadChunkBodiesTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cc_timeline_load_chunk_bodies_total", + Help: "Total number of load chunk bodies", + }, []string{"status"}) + prometheus.MustRegister(loadChunkBodiesTotal) + } + + loadChunkBodiesTotal.WithLabelValues("hit").Set(float64(metrics["load_chunk_bodies_cache_hits"])) + loadChunkBodiesTotal.WithLabelValues("miss").Set(float64(metrics["load_chunk_bodies_cache_misses"])) + + if timelineRealtimeConnectionMetrics == nil { + timelineRealtimeConnectionMetrics = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "cc_timeline_realtime_connections", + Help: "Number of realtime connections", + }, + ) + prometheus.MustRegister(timelineRealtimeConnectionMetrics) + } + + timelineRealtimeConnectionMetrics.Set(float64(atomic.LoadInt64(&s.socketCounter))) + + if outerConnection == nil { + outerConnection = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cc_timeline_outer_connections", + Help: "Number of outer connections", + }, + []string{"type"}, + ) + prometheus.MustRegister(outerConnection) + } + + outerConnection.WithLabelValues("desired").Set(float64(metrics["remoteSubs"])) + outerConnection.WithLabelValues("current").Set(float64(metrics["remoteConns"])) +} diff --git a/x/timeline/service_test.go b/x/timeline/service_test.go new file mode 100644 index 00000000..d6de5d5f --- /dev/null +++ b/x/timeline/service_test.go @@ -0,0 +1,445 @@ +package timeline + +import ( + "context" + "fmt" + "strconv" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/totegamma/concurrent/core" + "github.com/totegamma/concurrent/core/mock" + "github.com/totegamma/concurrent/x/timeline/mock" + "go.uber.org/mock/gomock" +) + +func TestGetRecentItemsSimple(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pivotEpoch := "6000" + pivotTime := core.EpochTime("6300") + prevEpoch := "5400" + + mockRepo := mock_timeline.NewMockRepository(ctrl) + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "t00000000000000000000000000"). + Return("t00000000000000000000000000@local.example.com", nil).AnyTimes() + mockRepo.EXPECT(). + LookupChunkItrs(gomock.Any(), []string{"t00000000000000000000000000@local.example.com"}, pivotEpoch). + Return(map[string]string{"t00000000000000000000000000@local.example.com": pivotEpoch}, nil) + mockRepo.EXPECT(). + LoadChunkBodies(gomock.Any(), map[string]string{"t00000000000000000000000000@local.example.com": pivotEpoch}). + Return(map[string]core.Chunk{ + "t00000000000000000000000000@local.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m00000000000000000000006302", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6302"), + }, + { + ResourceID: "m00000000000000000000006301", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6301"), + }, + { + ResourceID: "m00000000000000000000006300", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6300"), + }, + { + ResourceID: "m00000000000000000000006299", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6299"), + }, + { + ResourceID: "m00000000000000000000006298", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6298"), + }, + }, + }, + }, nil) + mockRepo.EXPECT(). + LookupChunkItrs(gomock.Any(), []string{"t00000000000000000000000000@local.example.com"}, prevEpoch). + Return(map[string]string{"t00000000000000000000000000@local.example.com": prevEpoch}, nil) + mockRepo.EXPECT(). + LoadChunkBodies(gomock.Any(), map[string]string{"t00000000000000000000000000@local.example.com": prevEpoch}). + Return(nil, errors.New("not found")) + + mockEntity := mock_core.NewMockEntityService(ctrl) + mockDomain := mock_core.NewMockDomainService(ctrl) + mockSemantic := mock_core.NewMockSemanticIDService(ctrl) + mockSubscription := mock_core.NewMockSubscriptionService(ctrl) + mockPolicy := mock_core.NewMockPolicyService(ctrl) + + service := NewService( + mockRepo, + mockEntity, + mockDomain, + mockSemantic, + mockSubscription, + mockPolicy, + core.Config{ + FQDN: "local.example.com", + }, + ) + + ctx := context.Background() + + items, err := service.GetRecentItems(ctx, []string{"t00000000000000000000000000"}, pivotTime, 16) + assert.NoError(t, err) + + assert.Len(t, items, 2) +} + +func TestGetRecentItemsLoadMore(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pivotEpoch := "6000" + pivotTime := core.EpochTime("6300") + prevEpoch := "5400" + + mockRepo := mock_timeline.NewMockRepository(ctrl) + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "t00000000000000000000000000"). + Return("t00000000000000000000000000@local.example.com", nil).AnyTimes() + mockRepo.EXPECT(). + LookupChunkItrs(gomock.Any(), []string{"t00000000000000000000000000@local.example.com"}, pivotEpoch). + Return(map[string]string{"t00000000000000000000000000@local.example.com": pivotEpoch}, nil) + + chunk6000 := []core.TimelineItem{} + for i := 0; i < 16; i++ { + epoch := 6308 - i + chunk6000 = append(chunk6000, core.TimelineItem{ + ResourceID: fmt.Sprintf("m0000000000000000000000%04d", epoch), + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime(strconv.Itoa(epoch)), + }) + } + + chunk5400 := []core.TimelineItem{} + for i := 0; i < 16; i++ { + epoch := 5399 - i + chunk5400 = append(chunk5400, core.TimelineItem{ + ResourceID: fmt.Sprintf("m0000000000000000000000%04d", epoch), + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime(strconv.Itoa(epoch)), + }) + } + + mockRepo.EXPECT(). + LoadChunkBodies(gomock.Any(), map[string]string{"t00000000000000000000000000@local.example.com": pivotEpoch}). + Return(map[string]core.Chunk{ + "t00000000000000000000000000@local.example.com": { + Epoch: pivotEpoch, + Items: chunk6000, + }, + }, nil) + + mockRepo.EXPECT(). + LookupChunkItrs(gomock.Any(), []string{"t00000000000000000000000000@local.example.com"}, prevEpoch). + Return(map[string]string{"t00000000000000000000000000@local.example.com": prevEpoch}, nil) + mockRepo.EXPECT(). + LoadChunkBodies(gomock.Any(), map[string]string{"t00000000000000000000000000@local.example.com": prevEpoch}). + Return(map[string]core.Chunk{ + "t00000000000000000000000000@local.example.com": { + Epoch: pivotEpoch, + Items: chunk5400, + }, + }, nil) + + mockEntity := mock_core.NewMockEntityService(ctrl) + mockDomain := mock_core.NewMockDomainService(ctrl) + mockSemantic := mock_core.NewMockSemanticIDService(ctrl) + mockSubscription := mock_core.NewMockSubscriptionService(ctrl) + mockPolicy := mock_core.NewMockPolicyService(ctrl) + + service := NewService( + mockRepo, + mockEntity, + mockDomain, + mockSemantic, + mockSubscription, + mockPolicy, + core.Config{ + FQDN: "local.example.com", + }, + ) + + ctx := context.Background() + + items, err := service.GetRecentItems(ctx, []string{"t00000000000000000000000000"}, pivotTime, 16) + assert.NoError(t, err) + + assert.Len(t, items, 16) + + expected := []string{} + for i := 0; i < 7; i++ { + epoch := 6299 - i + expected = append(expected, fmt.Sprintf("m0000000000000000000000%04d", epoch)) + } + for i := 0; i < 9; i++ { + epoch := 5399 - i + expected = append(expected, fmt.Sprintf("m0000000000000000000000%04d", epoch)) + } + + for i, item := range items { + assert.Equal(t, expected[i], item.ResourceID) + } +} + +func TestGetRecentItemsWide(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pivotEpoch := "6000" + pivotTime := core.EpochTime("6300") + + mockRepo := mock_timeline.NewMockRepository(ctrl) + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "t00000000000000000000000000"). + Return("t00000000000000000000000000@local.example.com", nil).AnyTimes() + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "test@con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2"). + Return("t11111111111111111111111111@local.example.com", nil).AnyTimes() + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com"). + Return("taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com", nil).AnyTimes() + mockRepo.EXPECT(). + GetNormalizationCache(gomock.Any(), "test@con1jmcread5dear85emug5gh3wvaf6st9av0kuxaj"). + Return("tbbbbbbbbbbbbbbbbbbbbbbbbbb@remote.example.com", nil).AnyTimes() + + mockRepo.EXPECT(). + LookupChunkItrs(gomock.Any(), []string{ + "t00000000000000000000000000@local.example.com", + "t11111111111111111111111111@local.example.com", + "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com", + "tbbbbbbbbbbbbbbbbbbbbbbbbbb@remote.example.com", + }, pivotEpoch). + Return(map[string]string{ + "t00000000000000000000000000@local.example.com": pivotEpoch, + "t11111111111111111111111111@local.example.com": pivotEpoch, + "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com": pivotEpoch, + "tbbbbbbbbbbbbbbbbbbbbbbbbbb@remote.example.com": pivotEpoch, + }, nil) + + mockRepo.EXPECT(). + LoadChunkBodies(gomock.Any(), map[string]string{ + "t00000000000000000000000000@local.example.com": pivotEpoch, + "t11111111111111111111111111@local.example.com": pivotEpoch, + "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com": pivotEpoch, + "tbbbbbbbbbbbbbbbbbbbbbbbbbb@remote.example.com": pivotEpoch, + }). + Return(map[string]core.Chunk{ + "t00000000000000000000000000@local.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m00000000000000000000000000", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6099"), + }, + { + ResourceID: "m00000000000000000000000001", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6098"), + }, + { + ResourceID: "m00000000000000000000000002", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6097"), + }, + { + ResourceID: "m00000000000000000000000003", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6096"), + }, + { + ResourceID: "m00000000000000000000000099", + TimelineID: "t00000000000000000000000000", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6004"), + }, + }, + }, + "t11111111111111111111111111@local.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "m11111111111111111111111110", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6089"), + }, + { + ResourceID: "m11111111111111111111111111", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6088"), + }, + { + ResourceID: "m11111111111111111111111112", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6087"), + }, + { + ResourceID: "m11111111111111111111111113", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6086"), + }, + { + ResourceID: "m11111111111111111111111199", + TimelineID: "t11111111111111111111111111", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6003"), + }, + }, + }, + "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "maaaaaaaaaaaaaaaaaaaaaaaaa0", + TimelineID: "taaaaaaaaaaaaaaaaaaaaaaaaaa", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6079"), + }, + { + ResourceID: "maaaaaaaaaaaaaaaaaaaaaaaaa1", + TimelineID: "taaaaaaaaaaaaaaaaaaaaaaaaaa", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6078"), + }, + { + ResourceID: "maaaaaaaaaaaaaaaaaaaaaaaaa2", + TimelineID: "taaaaaaaaaaaaaaaaaaaaaaaaaa", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6077"), + }, + { + ResourceID: "maaaaaaaaaaaaaaaaaaaaaaaaa3", + TimelineID: "taaaaaaaaaaaaaaaaaaaaaaaaaa", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6076"), + }, + { + ResourceID: "maaaaaaaaaaaaaaaaaaaaaaaa99", + TimelineID: "taaaaaaaaaaaaaaaaaaaaaaaaaa", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6002"), + }, + }, + }, + "tbbbbbbbbbbbbbbbbbbbbbbbbbb@remote.example.com": { + Epoch: pivotEpoch, + Items: []core.TimelineItem{ + { + ResourceID: "mbbbbbbbbbbbbbbbbbbbbbbbbb0", + TimelineID: "tbbbbbbbbbbbbbbbbbbbbbbbbbb", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6069"), + }, + { + ResourceID: "mbbbbbbbbbbbbbbbbbbbbbbbbb1", + TimelineID: "tbbbbbbbbbbbbbbbbbbbbbbbbbb", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6068"), + }, + { + ResourceID: "mbbbbbbbbbbbbbbbbbbbbbbbbb2", + TimelineID: "tbbbbbbbbbbbbbbbbbbbbbbbbbb", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6067"), + }, + { + ResourceID: "mbbbbbbbbbbbbbbbbbbbbbbbbb3", + TimelineID: "tbbbbbbbbbbbbbbbbbbbbbbbbbb", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6066"), + }, + { + ResourceID: "mbbbbbbbbbbbbbbbbbbbbbbbb99", + TimelineID: "tbbbbbbbbbbbbbbbbbbbbbbbbbb", + Owner: "con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + CDate: core.EpochTime("6001"), + }, + }, + }, + }, nil) + + mockEntity := mock_core.NewMockEntityService(ctrl) + mockDomain := mock_core.NewMockDomainService(ctrl) + mockSemantic := mock_core.NewMockSemanticIDService(ctrl) + mockSubscription := mock_core.NewMockSubscriptionService(ctrl) + mockPolicy := mock_core.NewMockPolicyService(ctrl) + + service := NewService( + mockRepo, + mockEntity, + mockDomain, + mockSemantic, + mockSubscription, + mockPolicy, + core.Config{ + FQDN: "local.example.com", + }, + ) + + ctx := context.Background() + + items, err := service.GetRecentItems( + ctx, + []string{ + "t00000000000000000000000000", + "test@con1t0tey8uxhkqkd4wcp4hd4jedt7f0vfhk29xdd2", + "taaaaaaaaaaaaaaaaaaaaaaaaaa@remote.example.com", + "test@con1jmcread5dear85emug5gh3wvaf6st9av0kuxaj", + }, + pivotTime, + 16, + ) + assert.NoError(t, err) + + assert.Len(t, items, 16) + + expected := []string{ + "m00000000000000000000000000", + "m00000000000000000000000001", + "m00000000000000000000000002", + "m00000000000000000000000003", + "m11111111111111111111111110", + "m11111111111111111111111111", + "m11111111111111111111111112", + "m11111111111111111111111113", + "maaaaaaaaaaaaaaaaaaaaaaaaa0", + "maaaaaaaaaaaaaaaaaaaaaaaaa1", + "maaaaaaaaaaaaaaaaaaaaaaaaa2", + "maaaaaaaaaaaaaaaaaaaaaaaaa3", + "mbbbbbbbbbbbbbbbbbbbbbbbbb0", + "mbbbbbbbbbbbbbbbbbbbbbbbbb1", + "mbbbbbbbbbbbbbbbbbbbbbbbbb2", + "mbbbbbbbbbbbbbbbbbbbbbbbbb3", + } + + for i, item := range items { + assert.Equal(t, expected[i], item.ResourceID) + } +}