-
Notifications
You must be signed in to change notification settings - Fork 9
/
.env.example
82 lines (65 loc) · 3.23 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Postgres Database Configuration
DBHOST = 'postgres'
DBNAME = 'genlayer_state'
DBUSER = 'postgres'
DBPASSWORD = 'postgres'
DBPORT = '5432'
# Logging Configuration
LOGCONFIG = 'dev' # dev/prod
FLASK_LOG_LEVEL = 'ERROR' # DEBUG/INFO/WARNING/ERROR/CRITICAL
DISABLE_INFO_LOGS_ENDPOINTS = '["ping", "eth_getTransactionByHash","gen_getContractSchemaForCode","gen_getContractSchema"]'
# JsonRPC Server Configuration
RPCPROTOCOL = 'http'
RPCHOST = 'jsonrpc'
RPCPORT = '4000'
RPCDEBUGPORT = '4678' # debugpy listening port
JSONRPC_REPLICAS = '1' # number of JsonRPC container replicas to run, used to scale up for production
# GenVM Configuration
GENVM_BIN = "/genvm/bin"
# VSCode Debug Configuration
VSCODEDEBUG = "false" # "true" or "false"
# Ollama Server Configuration
OLAMAPROTOCOL = 'http'
OLAMAHOST = 'ollama'
OLAMAPORT = '11434'
# WebRequest Server Configuration
WEBREQUESTPROTOCOL = 'http'
WEBREQUESTHOST = 'webrequest'
WEBREQUESTPORT = '5000'
WEBREQUESTSELENIUMPORT = '5001'
# NGINX Server Configuration
SERVER_NAME = 'studio.genlayer.com'
# Frontend Configuration
# If you want to run the frontend in production, change http to https and ws to wss
VITE_JSON_RPC_SERVER_URL = 'http://127.0.0.1:4000/api' # if VITE_PROXY_ENABLED = 'true' change to '/api'
VITE_WS_SERVER_URL = 'ws://127.0.0.1:4000' # if VITE_PROXY_ENABLED = 'true' change to '/'
VITE_PLAUSIBLE_DOMAIN = 'studio.genlayer.com'
FRONTEND_PORT = '8080'
FRONTEND_BUILD_TARGET = 'final' # change to 'dev' to run in dev mode
# Vite Proxy Configuration (for local development)
VITE_PROXY_ENABLED = 'false'
VITE_PROXY_JSON_RPC_SERVER_URL = 'http://jsonrpc:4000'
VITE_PROXY_WS_SERVER_URL = 'ws://jsonrpc:4000'
VITE_IS_HOSTED = 'false'
FRONTEND_BUILD_TARGET = 'final' # change to 'dev' to run in dev mode
# Backend Configuration
BACKEND_BUILD_TARGET = 'debug' # change to 'prod' or remove to run in prod mode
# Hardhat port
HARDHAT_URL = 'http://hardhat'
HARDHAT_PORT = '8545'
HARDHAT_PRIVATE_KEY = '0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'
# LLM Providers Configuration
# If you want to use OpenAI LLMs, add your key here
OPENAIKEY = '<add_your_openai_api_key_here>'
# If you want to use Anthropic AI LLMs, add your key here
ANTHROPIC_API_KEY = '<add_your_anthropic_api_key_here>'
# If you want to use Heurist AI LLMs, add your key here
HEURISTAIURL = 'https://llm-gateway.heurist.xyz'
HEURISTAIMODELSURL = 'https://raw.githubusercontent.com/heurist-network/heurist-models/main/models.json'
HEURISTAIAPIKEY = '<add_your_heuristai_api_key_here>'
# Validator Configuration
# JSON array of initial validators to be created on startup.
# Example: VALIDATORS_CONFIG_JSON = '[{"stake": 100, "provider": "openai", "model": "gpt-4o", "amount": 2}, {"stake": 200, "provider": "anthropic", "model": "claude-3-haiku-20240307", "amount": 1}]'
VALIDATORS_CONFIG_JSON = ''
# Consensus mechanism
VITE_FINALITY_WINDOW = 1800 # in seconds