Skip to content

Commit

Permalink
Merge pull request #198 from utxostack/feat/cluster-n-redis
Browse files Browse the repository at this point in the history
feat: use cluster and add redis for bullmq queue
  • Loading branch information
ahonn authored Sep 25, 2024
2 parents bbc3172 + 01830d0 commit 143021d
Show file tree
Hide file tree
Showing 10 changed files with 158 additions and 19 deletions.
14 changes: 12 additions & 2 deletions .github/workflows/backend-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest

services:
redis:
redis-cache:
image: redis
options: >-
--health-cmd "redis-cli ping"
Expand All @@ -23,6 +23,15 @@ jobs:
--health-retries 5
ports:
- 6379:6379
redis-queue:
image: redis
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6380:6379

steps:
- name: Checkout code
Expand Down Expand Up @@ -57,7 +66,8 @@ jobs:
echo BITCOIN_ELECTRS_API_URL="{{ secrets.BITCOIN_ELECTRS_API_URL }}" >> .env
echo CKB_EXPLORER_API_URL="${{ secrets.CKB_EXPLORER_API_URL }}" >> .env
echo CKB_RPC_WEBSOCKET_URL="${{ secrets.CKB_RPC_WEBSOCKET_URL }}" >> .env
echo REDIS_URL="redis://localhost:6379" >> .env
echo REDIS_CACHE_URL="redis://localhost:6379" >> .env
echo REDIS_QUEUE_URL="redis://localhost:6380" >> .env
echo DATABASE_URL="postgres://postgres:postgres@postgres:5432/explorer?sslmode=disable" >> .env
cat .env
pnpm run test
Expand Down
80 changes: 80 additions & 0 deletions backend/redis-queue.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Redis configuration
#
# Example: https://raw.githubusercontent.com/redis/redis/7.4/redis.conf

################################## NETWORK #####################################
bind 0.0.0.0

################################ SNAPSHOTTING ################################

# Save the DB to disk.
#
# save <seconds> <changes> [<seconds> <changes> ...]
#
# Redis will save the DB if the given number of seconds elapsed and it
# surpassed the given number of write operations against the DB.
#
# Snapshotting can be completely disabled with a single empty string argument
# as in following example:
#
# save ""
#
# Unless specified otherwise, by default Redis will save the DB:
# * After 3600 seconds (an hour) if at least 1 change was performed
# * After 300 seconds (5 minutes) if at least 100 changes were performed
# * After 60 seconds if at least 10000 changes were performed
#
# You can set these explicitly by uncommenting the following line.
#
save 3600 1 300 100 60 10000

############################## APPEND ONLY MODE ###############################

# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check https://redis.io/topics/persistence for more information.
appendonly yes

# Redis can create append-only base files in either RDB or AOF formats. Using
# the RDB format is always faster and more efficient, and disabling it is only
# supported for backward compatibility purposes.
aof-use-rdb-preamble yes

# Set a memory usage limit to the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU or LFU cache, or to
# set a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have replicas attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the replicas are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of replicas is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have replicas attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for replica
# output buffers (but this is not needed if the policy is 'noeviction').
maxmemory 2gb
maxmemory-policy noeviction
4 changes: 2 additions & 2 deletions backend/src/app.module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import { BootstrapService } from './bootstrap.service';
imports: [ConfigModule],
useFactory: async (configService: ConfigService<Env>) => {
const store = (await redisStore({
url: configService.get('REDIS_URL'),
url: configService.get('REDIS_CACHE_URL'),
isCacheable: (value) => value !== undefined,
})) as unknown as CacheStore;
return {
Expand All @@ -36,7 +36,7 @@ import { BootstrapService } from './bootstrap.service';
BullModule.forRootAsync({
imports: [ConfigModule],
useFactory: async (configService: ConfigService<Env>) => {
const url = new URL(configService.get('REDIS_URL')!);
const url = new URL(configService.get('REDIS_QUEUE_URL')!);
return {
connection: {
host: url.hostname,
Expand Down
18 changes: 17 additions & 1 deletion backend/src/bootstrap.service.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { Injectable, Logger } from '@nestjs/common';
import { PrismaService } from './core/database/prisma/prisma.service';
import { IndexerServiceFactory } from './core/indexer/indexer.factory';
import cluster from 'node:cluster';

@Injectable()
export class BootstrapService {
Expand All @@ -9,7 +10,22 @@ export class BootstrapService {
constructor(
private prismaService: PrismaService,
private IndexerServiceFactory: IndexerServiceFactory,
) {}
) { }

public async bootstrap() {
if (cluster.isPrimary) {
cluster.fork();
cluster.on('exit', (worker, code, signal) => {
this.logger.error(
`Worker ${worker.process.pid} died with code ${code} and signal ${signal}`,
);
this.logger.log('Starting a new worker');
cluster.fork();
});
} else {
await this.bootstrapAssetsIndex();
}
}

public async bootstrapAssetsIndex() {
const chains = await this.prismaService.chain.findMany();
Expand Down
4 changes: 1 addition & 3 deletions backend/src/core/indexer/flow/assets.flow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,7 @@ export class IndexerAssetsFlow extends EventEmitter {

private setupBlockAssetsIndexedListener() {
this.on(IndexerAssetsEvent.BlockAssetsIndexed, () => {
setTimeout(() => {
this.startBlockAssetsIndexing();
}, 1000 * 10);
setTimeout(this.startBlockAssetsIndexing.bind(this), 1000 * 10);
});
}
}
4 changes: 1 addition & 3 deletions backend/src/core/indexer/flow/transactions.flow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,7 @@ export class IndexerTransactionsFlow extends EventEmitter {

private setupBlockAssetsIndexedListener() {
this.on(IndexerTransactionsEvent.BlockIndexed, () => {
setTimeout(() => {
this.startBlockAssetsIndexing();
}, 1000 * 10);
setTimeout(this.startBlockAssetsIndexing.bind(this), 1000 * 10);
});
}
}
3 changes: 2 additions & 1 deletion backend/src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ export const envSchema = z
}),

DATABASE_URL: z.string(),
REDIS_URL: z.string(),
REDIS_CACHE_URL: z.string(),
REDIS_QUEUE_URL: z.string(),

BITCOIN_PRIMARY_DATA_PROVIDER: z.enum(['mempool', 'electrs']).default('mempool'),

Expand Down
5 changes: 4 additions & 1 deletion backend/src/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import { FastifyAdapter, NestFastifyApplication } from '@nestjs/platform-fastify
import { envSchema } from './env';
import { BootstrapService } from './bootstrap.service';
import { LogLevel } from '@nestjs/common';
import cluster from 'node:cluster';

const env = envSchema.parse(process.env);
const LOGGER_LEVELS: LogLevel[] = ['verbose', 'debug', 'log', 'warn', 'error'];
Expand Down Expand Up @@ -39,6 +40,8 @@ async function bootstrap() {
});
}

await app.listen(3000, '0.0.0.0');
if (cluster.isPrimary) {
await app.listen(3000, '0.0.0.0');
}
}
bootstrap();
24 changes: 20 additions & 4 deletions docker-compose-preview.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
services:
preview-explorer-backend:
depends_on:
preview-redis:
preview-redis-cache:
condition: service_started
preview-redis-queue:
condition: service_started
preview-postgres:
condition: service_healthy
Expand All @@ -19,18 +21,31 @@ services:
networks:
- preview

preview-redis:
preview-redis-cache:
# https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile
image: redis:7-alpine
restart: unless-stopped
volumes:
# Redis' WORKDIR is /data
- preview-redis-data:/data
- preview-redis-cache-data:/data
- ./backend/redis.conf:/usr/local/etc/redis/redis.conf:ro
command: /usr/local/etc/redis/redis.conf
networks:
- preview


preview-redis-queue:
# https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile
image: redis:7-alpine
restart: unless-stopped
volumes:
# Redis' WORKDIR is /data
- preview-redis-queue-data:/data
- ./backend/redis-queue.conf:/usr/local/etc/redis/redis.conf:ro
command: /usr/local/etc/redis/redis.conf
networks:
- preview

preview-postgres:
image: postgres:13
env_file:
Expand All @@ -47,7 +62,8 @@ services:
- preview

volumes:
preview-redis-data:
preview-redis-cache-data:
preview-redis-queue-data:
preview-pg-volume:

networks:
Expand Down
21 changes: 19 additions & 2 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@ services:
ports:
- '3000:3000'
depends_on:
redis:
redis-cache:
condition: service_started
redis-queue:
condition: service_started
postgres:
condition: service_healthy
Expand All @@ -21,7 +23,7 @@ services:
networks:
- internal

redis:
redis-cache:
# https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile
image: redis:7-alpine
restart: unless-stopped
Expand All @@ -35,6 +37,20 @@ services:
networks:
- internal

redis-queue:
# https://github.com/docker-library/redis/blob/b77450d/7.4/alpine/Dockerfile
image: redis:7-alpine
restart: unless-stopped
ports:
- '127.0.0.1:6380:6379'
command: /usr/local/etc/redis/redis.conf
volumes:
# Redis' WORKDIR is /data
- redis-queue-data:/data
- ./backend/redis-queue.conf:/usr/local/etc/redis/redis.conf:ro
networks:
- internal

postgres:
image: postgres:13
env_file:
Expand All @@ -54,6 +70,7 @@ services:

volumes:
redis-data:
redis-queue-data:
postgres_volume:

networks:
Expand Down

0 comments on commit 143021d

Please sign in to comment.