From a7674b2e1ca0ee9519e35ca48c68db60874e78f2 Mon Sep 17 00:00:00 2001 From: ahonn Date: Wed, 25 Sep 2024 20:26:44 +1000 Subject: [PATCH 1/3] feat: use cluster and add redis for bullmq queue --- backend/redis-queue.conf | 80 ++++++++++++++++++++++++++++++++ backend/src/app.module.ts | 4 +- backend/src/bootstrap.service.ts | 9 ++++ backend/src/env.ts | 3 +- backend/src/main.ts | 5 +- docker-compose-preview.yaml | 24 ++++++++-- docker-compose.yaml | 21 ++++++++- 7 files changed, 136 insertions(+), 10 deletions(-) create mode 100644 backend/redis-queue.conf diff --git a/backend/redis-queue.conf b/backend/redis-queue.conf new file mode 100644 index 00000000..06175924 --- /dev/null +++ b/backend/redis-queue.conf @@ -0,0 +1,80 @@ +# Redis configuration +# +# Example: https://raw.githubusercontent.com/redis/redis/7.4/redis.conf + +################################## NETWORK ##################################### +bind 0.0.0.0 + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save [ ...] +# +# Redis will save the DB if the given number of seconds elapsed and it +# surpassed the given number of write operations against the DB. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 change was performed +# * After 300 seconds (5 minutes) if at least 100 changes were performed +# * After 60 seconds if at least 10000 changes were performed +# +# You can set these explicitly by uncommenting the following line. +# +save 3600 1 300 100 60 10000 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. +appendonly yes + +# Redis can create append-only base files in either RDB or AOF formats. Using +# the RDB format is always faster and more efficient, and disabling it is only +# supported for backward compatibility purposes. +aof-use-rdb-preamble yes + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +maxmemory 2gb +maxmemory-policy noeviction diff --git a/backend/src/app.module.ts b/backend/src/app.module.ts index 0c11cbd8..ae754497 100644 --- a/backend/src/app.module.ts +++ b/backend/src/app.module.ts @@ -24,7 +24,7 @@ import { BootstrapService } from './bootstrap.service'; imports: [ConfigModule], useFactory: async (configService: ConfigService) => { const store = (await redisStore({ - url: configService.get('REDIS_URL'), + url: configService.get('REDIS_CACHE_URL'), isCacheable: (value) => value !== undefined, })) as unknown as CacheStore; return { @@ -36,7 +36,7 @@ import { BootstrapService } from './bootstrap.service'; BullModule.forRootAsync({ imports: [ConfigModule], useFactory: async (configService: ConfigService) => { - const url = new URL(configService.get('REDIS_URL')!); + const url = new URL(configService.get('REDIS_QUEUE_URL')!); return { connection: { host: url.hostname, diff --git a/backend/src/bootstrap.service.ts b/backend/src/bootstrap.service.ts index 7a01c186..d40f0212 100644 --- a/backend/src/bootstrap.service.ts +++ b/backend/src/bootstrap.service.ts @@ -1,6 +1,7 @@ import { Injectable, Logger } from '@nestjs/common'; import { PrismaService } from './core/database/prisma/prisma.service'; import { IndexerServiceFactory } from './core/indexer/indexer.factory'; +import cluster from 'node:cluster'; @Injectable() export class BootstrapService { @@ -11,6 +12,14 @@ export class BootstrapService { private IndexerServiceFactory: IndexerServiceFactory, ) {} + public async bootstrap() { + if (cluster.isPrimary) { + cluster.fork(); + } else { + await this.bootstrapAssetsIndex(); + } + } + public async bootstrapAssetsIndex() { const chains = await this.prismaService.chain.findMany(); for (const chain of chains) { diff --git a/backend/src/env.ts b/backend/src/env.ts index 1ab52b6b..348dfdb4 100644 --- a/backend/src/env.ts +++ b/backend/src/env.ts @@ -24,7 +24,8 @@ export const envSchema = z }), DATABASE_URL: z.string(), - REDIS_URL: z.string(), + REDIS_CACHE_URL: z.string(), + REDIS_QUEUE_URL: z.string(), BITCOIN_PRIMARY_DATA_PROVIDER: z.enum(['mempool', 'electrs']).default('mempool'), diff --git a/backend/src/main.ts b/backend/src/main.ts index acb704c2..86dde450 100644 --- a/backend/src/main.ts +++ b/backend/src/main.ts @@ -5,6 +5,7 @@ import { FastifyAdapter, NestFastifyApplication } from '@nestjs/platform-fastify import { envSchema } from './env'; import { BootstrapService } from './bootstrap.service'; import { LogLevel } from '@nestjs/common'; +import cluster from 'node:cluster'; const env = envSchema.parse(process.env); const LOGGER_LEVELS: LogLevel[] = ['verbose', 'debug', 'log', 'warn', 'error']; @@ -39,6 +40,8 @@ async function bootstrap() { }); } - await app.listen(3000, '0.0.0.0'); + if (cluster.isPrimary) { + await app.listen(3000, '0.0.0.0'); + } } bootstrap(); diff --git a/docker-compose-preview.yaml b/docker-compose-preview.yaml index 6bf1b731..6c755de4 100644 --- a/docker-compose-preview.yaml +++ b/docker-compose-preview.yaml @@ -1,7 +1,9 @@ services: preview-explorer-backend: depends_on: - preview-redis: + preview-redis-cache: + condition: service_started + preview-redis-queue: condition: service_started preview-postgres: condition: service_healthy @@ -19,18 +21,31 @@ services: networks: - preview - preview-redis: + preview-redis-cache: # https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile image: redis:7-alpine restart: unless-stopped volumes: # Redis' WORKDIR is /data - - preview-redis-data:/data + - preview-redis-cache-data:/data - ./backend/redis.conf:/usr/local/etc/redis/redis.conf:ro command: /usr/local/etc/redis/redis.conf networks: - preview + + preview-redis-queue: + # https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile + image: redis:7-alpine + restart: unless-stopped + volumes: + # Redis' WORKDIR is /data + - preview-redis-queue-data:/data + - ./backend/redis-queue.conf:/usr/local/etc/redis/redis.conf:ro + command: /usr/local/etc/redis/redis.conf + networks: + - preview + preview-postgres: image: postgres:13 env_file: @@ -47,7 +62,8 @@ services: - preview volumes: - preview-redis-data: + preview-redis-cache-data: + preview-redis-queue-data: preview-pg-volume: networks: diff --git a/docker-compose.yaml b/docker-compose.yaml index cc7b5705..e0318143 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -8,7 +8,9 @@ services: ports: - '3000:3000' depends_on: - redis: + redis-cache: + condition: service_started + redis-queue: condition: service_started postgres: condition: service_healthy @@ -21,7 +23,7 @@ services: networks: - internal - redis: + redis-cache: # https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile image: redis:7-alpine restart: unless-stopped @@ -35,6 +37,20 @@ services: networks: - internal + redis-queue: + # https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile + image: redis:7-alpine + restart: unless-stopped + ports: + - '127.0.0.1:6380:6379' + command: /usr/local/etc/redis/redis.conf + volumes: + # Redis' WORKDIR is /data + - redis-queue-data:/data + - ./backend/redis-queue.conf:/usr/local/etc/redis/redis.conf:ro + networks: + - internal + postgres: image: postgres:13 env_file: @@ -54,6 +70,7 @@ services: volumes: redis-data: + redis-queue-data: postgres_volume: networks: From 3fe6e2f418334e7379cd90c5f0fb3a7d78b3bc19 Mon Sep 17 00:00:00 2001 From: ahonn Date: Wed, 25 Sep 2024 21:04:28 +1000 Subject: [PATCH 2/3] fix: remove scheduler registry --- backend/src/bootstrap.service.ts | 9 ++++++++- backend/src/core/indexer/flow/assets.flow.ts | 4 +--- backend/src/core/indexer/flow/transactions.flow.ts | 4 +--- backend/src/core/indexer/indexer.service.ts | 1 + 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/backend/src/bootstrap.service.ts b/backend/src/bootstrap.service.ts index d40f0212..98b14cf6 100644 --- a/backend/src/bootstrap.service.ts +++ b/backend/src/bootstrap.service.ts @@ -10,11 +10,18 @@ export class BootstrapService { constructor( private prismaService: PrismaService, private IndexerServiceFactory: IndexerServiceFactory, - ) {} + ) { } public async bootstrap() { if (cluster.isPrimary) { cluster.fork(); + cluster.on('exit', (worker, code, signal) => { + this.logger.error( + `Worker ${worker.process.pid} died with code ${code} and signal ${signal}`, + ); + this.logger.log('Starting a new worker'); + cluster.fork(); + }); } else { await this.bootstrapAssetsIndex(); } diff --git a/backend/src/core/indexer/flow/assets.flow.ts b/backend/src/core/indexer/flow/assets.flow.ts index c020a51a..1327d561 100644 --- a/backend/src/core/indexer/flow/assets.flow.ts +++ b/backend/src/core/indexer/flow/assets.flow.ts @@ -88,9 +88,7 @@ export class IndexerAssetsFlow extends EventEmitter { private setupBlockAssetsIndexedListener() { this.on(IndexerAssetsEvent.BlockAssetsIndexed, () => { - setTimeout(() => { - this.startBlockAssetsIndexing(); - }, 1000 * 10); + setTimeout(this.startBlockAssetsIndexing.bind(this), 1000 * 10); }); } } diff --git a/backend/src/core/indexer/flow/transactions.flow.ts b/backend/src/core/indexer/flow/transactions.flow.ts index a34b6354..4ec9f41d 100644 --- a/backend/src/core/indexer/flow/transactions.flow.ts +++ b/backend/src/core/indexer/flow/transactions.flow.ts @@ -57,9 +57,7 @@ export class IndexerTransactionsFlow extends EventEmitter { private setupBlockAssetsIndexedListener() { this.on(IndexerTransactionsEvent.BlockIndexed, () => { - setTimeout(() => { - this.startBlockAssetsIndexing(); - }, 1000 * 10); + setTimeout(this.startBlockAssetsIndexing.bind(this), 1000 * 10); }); } } diff --git a/backend/src/core/indexer/indexer.service.ts b/backend/src/core/indexer/indexer.service.ts index af8da99d..96e7b0ba 100644 --- a/backend/src/core/indexer/indexer.service.ts +++ b/backend/src/core/indexer/indexer.service.ts @@ -4,6 +4,7 @@ import { BlockchainService } from '../blockchain/blockchain.service'; import { PrismaService } from '../database/prisma/prisma.service'; import { IndexerQueueService } from './indexer.queue'; import { IndexerTransactionsFlow } from './flow/transactions.flow'; +import { SchedulerRegistry } from '@nestjs/schedule'; export class IndexerService { public assetsFlow: IndexerAssetsFlow; From 01830d04d932b1b222c6b3a9a08f68318a42c4c7 Mon Sep 17 00:00:00 2001 From: ahonn Date: Wed, 25 Sep 2024 21:08:05 +1000 Subject: [PATCH 3/3] ci: update backend test --- .github/workflows/backend-test.yml | 14 ++++++++++++-- backend/src/core/indexer/indexer.service.ts | 1 - 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/workflows/backend-test.yml b/.github/workflows/backend-test.yml index f1890091..7d354d48 100644 --- a/.github/workflows/backend-test.yml +++ b/.github/workflows/backend-test.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest services: - redis: + redis-cache: image: redis options: >- --health-cmd "redis-cli ping" @@ -23,6 +23,15 @@ jobs: --health-retries 5 ports: - 6379:6379 + redis-queue: + image: redis + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 6380:6379 steps: - name: Checkout code @@ -57,7 +66,8 @@ jobs: echo BITCOIN_ELECTRS_API_URL="{{ secrets.BITCOIN_ELECTRS_API_URL }}" >> .env echo CKB_EXPLORER_API_URL="${{ secrets.CKB_EXPLORER_API_URL }}" >> .env echo CKB_RPC_WEBSOCKET_URL="${{ secrets.CKB_RPC_WEBSOCKET_URL }}" >> .env - echo REDIS_URL="redis://localhost:6379" >> .env + echo REDIS_CACHE_URL="redis://localhost:6379" >> .env + echo REDIS_QUEUE_URL="redis://localhost:6380" >> .env echo DATABASE_URL="postgres://postgres:postgres@postgres:5432/explorer?sslmode=disable" >> .env cat .env pnpm run test diff --git a/backend/src/core/indexer/indexer.service.ts b/backend/src/core/indexer/indexer.service.ts index 96e7b0ba..af8da99d 100644 --- a/backend/src/core/indexer/indexer.service.ts +++ b/backend/src/core/indexer/indexer.service.ts @@ -4,7 +4,6 @@ import { BlockchainService } from '../blockchain/blockchain.service'; import { PrismaService } from '../database/prisma/prisma.service'; import { IndexerQueueService } from './indexer.queue'; import { IndexerTransactionsFlow } from './flow/transactions.flow'; -import { SchedulerRegistry } from '@nestjs/schedule'; export class IndexerService { public assetsFlow: IndexerAssetsFlow;