Skip to content

Commit

Permalink
feat: Change the env variable OPENAI_API_KEY to `OPENAI_API_KEY_CON…
Browse files Browse the repository at this point in the history
…FIG` (#136)

* feat: Change the env variable `OPENAI_API_KEY` to `OPENAI_API_KEY_CONFIG`

* chore: update .github conf
  • Loading branch information
KenyonY authored Jun 8, 2024
1 parent 10eceb6 commit d0fab50
Show file tree
Hide file tree
Showing 9 changed files with 32 additions and 37 deletions.
4 changes: 2 additions & 2 deletions .env
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ BENCHMARK_MODE=true
FORWARD_CONFIG=[{"base_url":"https://api.openai.com","route":"/","type":"openai"}]

#LEVEL_MODELS={"1": ["gpt-4"], "2": ["gpt-3.5-turbo"]}
#OPENAI_API_KEY={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]}
#FORWARD_KEY={"fk-0": 0, "fk-1": 1, "fk-2": 2, "default": 1}
#OPENAI_API_KEY_CONFIG={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]}
#FORWARD_KEY_CONFIG={"fk-0": 0, "fk-1": 1, "fk-2": 2, "default": 1}

# `REQ_RATE_LIMIT`: i.e., Request rate limit for specified routes, user specific
# format: {route: ratelimit-string}
Expand Down
4 changes: 2 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ BENCHMARK_MODE=true
FORWARD_CONFIG=[{"base_url":"https://api.openai.com","route":"/","type":"openai"},{"base_url":"https://generativelanguage.googleapis.com","route":"/gemini","type":"general"}]

#LEVEL_MODELS={"1": ["gpt-4"], "2": ["gpt-3.5-turbo"]}
#OPENAI_API_KEY={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]}
#FORWARD_KEY={"fk-0": 0, "fk-1": 1, "fk-2": 2}
#OPENAI_API_KEY_CONFIG={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]}
#FORWARD_KEY_CONFIG={"fk-0": 0, "fk-1": 1, "fk-2": 2}

# `REQ_RATE_LIMIT`: 指定路由的请求速率限制(区分用户)
# `REQ_RATE_LIMIT`: i.e., Request rate limit for specified routes, user specific
Expand Down
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug-report-zh.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ body:
description: |
请确认以下所有项均被满足
options:
- label: 我运行的`openai-forward`版本不低于v0.7.0
- label: 我运行的`openai-forward`版本不低于v0.8.0
required: true

- type: textarea
Expand Down
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug-report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ body:
description: |
Just a few checks to make sure you need to create a bug report.
options:
- label: My `openai-forward` version is not lower than v0.7.0
- label: My `openai-forward` version is not lower than v0.8.0
required: true

- type: textarea
Expand Down
16 changes: 1 addition & 15 deletions openai_forward/__init__.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,6 @@
__version__ = "0.8.0"
__version__ = "0.8.1"

from dotenv import load_dotenv
from yaml import load


def yaml_load(filepath):

try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with open(filepath, mode='r', encoding="utf-8") as stream:
# stream = stream.read()
content = load(stream, Loader=Loader)
return content


# yaml_load()
load_dotenv('.env', override=False)
14 changes: 7 additions & 7 deletions openai_forward/config/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,22 +84,22 @@ class RateLimit(Base):
token_rate_limit: List[RateLimitType] = [
RateLimitType(
route="/v1/chat/completions",
value=[{"level": '0', "rate_limit": "60/second"}],
value=[{"level": '0', "limit": "60/second"}],
),
RateLimitType(
route="/v1/completions", value=[{"level": '0', "rate_limit": "60/second"}]
route="/v1/completions", value=[{"level": '0', "limit": "60/second"}]
),
]
req_rate_limit: List[RateLimitType] = [
RateLimitType(
route="/v1/chat/completions",
value=[{"level": '0', "rate_limit": "100/2minutes"}],
value=[{"level": '0', "limit": "100/2minutes"}],
),
RateLimitType(
route="/v1/completions", value=[{"level": '0', "rate_limit": "60/minute"}]
route="/v1/completions", value=[{"level": '0', "limit": "60/minute"}]
),
RateLimitType(
route="/v1/embeddings", value=[{"level": '0', "rate_limit": "100/2minutes"}]
route="/v1/embeddings", value=[{"level": '0', "limit": "100/2minutes"}]
),
]
iter_chunk: Literal['one-by-one', 'efficiency'] = 'one-by-one'
Expand Down Expand Up @@ -136,8 +136,8 @@ def convert_to_env(self, set_env=False):
value: str
values = value.strip().replace(',', ',').split(',')
openai_key_dict[key] = [int(i) for i in values]
env_dict['OPENAI_API_KEY'] = json.dumps(openai_key_dict)
env_dict['FORWARD_KEY'] = json.dumps(self.forward_key)
env_dict['OPENAI_API_KEY_CONFIG'] = json.dumps(openai_key_dict)
env_dict['FORWARD_KEY_CONFIG'] = json.dumps(self.forward_key)
env_dict['LEVEL_MODELS'] = json.dumps(self.level)
if set_env:
os.environ.update(env_dict)
Expand Down
2 changes: 1 addition & 1 deletion openai_forward/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def print_rate_limit_info(
"""
Print rate limit information.
"""
table = Table(title="", box=None, width=61)
table = Table(title="", box=None)
table.add_column("")
table.add_column("", justify='left')
backend = backend or "memory"
Expand Down
4 changes: 2 additions & 2 deletions openai_forward/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@
IP_WHITELIST = env2list("IP_WHITELIST", sep=ENV_VAR_SEP)
IP_BLACKLIST = env2list("IP_BLACKLIST", sep=ENV_VAR_SEP)

OPENAI_API_KEY = env2dict("OPENAI_API_KEY")
FWD_KEY = env2dict("FORWARD_KEY")
OPENAI_API_KEY = env2dict("OPENAI_API_KEY_CONFIG")
FWD_KEY = env2dict("FORWARD_KEY_CONFIG")
LEVEL_MODELS = {int(key): value for key, value in env2dict("LEVEL_MODELS").items()}

PROXY = os.environ.get("PROXY", "").strip() or None
Expand Down
21 changes: 15 additions & 6 deletions openai_forward/webui/run.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import ast
import pickle
import secrets
import threading

import orjson
Expand Down Expand Up @@ -78,6 +79,8 @@ def worker(log_socket: zmq.Socket, q: SimpleQueue):
),
)

st.write("---")

if st.button(
"Apply and Restart", help="Saving configuration and reloading openai forward"
):
Expand All @@ -94,6 +97,12 @@ def generate_env_content():
env_content = "\n".join([f"{key}={value}" for key, value in env_dict.items()])
return env_content

if st.button("Save to .env", help="Saving configuration to .env file"):
with st.spinner("Saving configuration to .env file."):
with open(".env", "w") as f:
f.write(generate_env_content())
st.success("Configuration saved to .env file")

if st.button(
"Export to .env file",
):
Expand Down Expand Up @@ -142,7 +151,7 @@ def display_forward_configuration():
"> - type=general转发下的服务可以是任何服务(暂不支持websocket)"
)

st.write("#")
# st.write("#")

submitted = st.form_submit_button("Save", use_container_width=True)
if submitted:
Expand All @@ -152,7 +161,7 @@ def display_forward_configuration():
if row["route"] is not None and row["base_url"] is not None
]

print(forward_config.convert_to_env())
print("save forward config success")


def display_api_key_configuration():
Expand Down Expand Up @@ -284,7 +293,7 @@ def display_api_key_configuration():

api_key.level = level_model_map

print(api_key.convert_to_env())
print("save api key success")


def display_cache_configuration():
Expand Down Expand Up @@ -334,7 +343,7 @@ def display_cache_configuration():
if row["cache_route"] is not None
]

print(cache.convert_to_env())
print("save cache success")


def display_rate_limit_configuration():
Expand Down Expand Up @@ -398,7 +407,7 @@ def display_rate_limit_configuration():
for _, row in edited_req_rate_limit_df.iterrows()
]

print(rate_limit.convert_to_env())
print("save rate limit success")


def display_other_configuration():
Expand All @@ -420,7 +429,7 @@ def display_other_configuration():
config.proxy = proxy
config.benchmark_mode = benchmark_mode

print(config.convert_to_env())
print("save other config success")


if selected_section == "Forward":
Expand Down

0 comments on commit d0fab50

Please sign in to comment.