From 1020ff7f6a2952da334924736adf3b4694e49c8e Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 15:54:22 +0530
Subject: [PATCH 1/6] modules: ask: Switch to the GPT-4 model as the default
---
modules/ask.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/ask.py b/modules/ask.py
index 3c40533..dcafd2b 100644
--- a/modules/ask.py
+++ b/modules/ask.py
@@ -21,7 +21,7 @@
import random
from g4f.client import Client as g4fClient
-from g4f.models import default
+from g4f.models import gpt_4
from g4f.Provider import Bing, FreeChatgpt, RetryProvider, You
from g4f.stubs import ChatCompletion
from pyrogram import filters
@@ -81,7 +81,7 @@ async def generate_response(user_prompts: list[dict[str, str]]) -> str:
None,
client.chat.completions.create,
resultant_prompt,
- default,
+ gpt_4,
)
return response.choices[0].message.content
except Exception as e:
From 5dc5f4f956189c1d741278289870c24e449b9d29 Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 16:25:09 +0530
Subject: [PATCH 2/6] modules: ask: Reply with a placeholder message for the
response generation
---
modules/ask.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/ask.py b/modules/ask.py
index dcafd2b..107c2c1 100644
--- a/modules/ask.py
+++ b/modules/ask.py
@@ -111,5 +111,7 @@ async def cmd_ask(app: Client, message: Message):
previous_prompts.reverse()
previous_prompts.append({"role": "user", "content": message.text})
+ to_edit = await message.reply("Generating response...", reply_to_message_id=message.id)
+
response: str = await generate_response(previous_prompts)
- await message.reply(response)
+ await to_edit.edit_text(response)
From b5f9f938520dc1f406c1cf5695747fd2ed95c65a Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 16:27:16 +0530
Subject: [PATCH 3/6] tests: Drop the FreeChatgpt test
The FreeChatgpt provider gets rate-limited too easily, so for that reason, drop it.
---
tests/test_freechatgpt.py | 48 ---------------------------------------
1 file changed, 48 deletions(-)
delete mode 100644 tests/test_freechatgpt.py
diff --git a/tests/test_freechatgpt.py b/tests/test_freechatgpt.py
deleted file mode 100644
index b4ea2d9..0000000
--- a/tests/test_freechatgpt.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-only
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, version 3 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-# Copyright (c) 2024, YeetCode Developers
-
-from g4f.client import Client
-from g4f.models import default
-from g4f.Provider import FreeChatgpt
-
-
-def generate_response() -> str:
- client = Client(provider=FreeChatgpt)
-
- try:
- response = client.chat.completions.create(
- model=default,
- messages=[
- {"role": "user", "content": "Say hi, with your response starting with START and ending with END"}
- ],
- )
- except:
- print("ERROR: Could not create a prompt!")
- raise
-
- return response.choices[0].message.content
-
-
-class TestOutput:
- def test_output(self):
- response = generate_response()
-
- if len(response) > 0:
- print("✅ FreeChatgpt is up!")
- else:
- print("❌ FreeChatgpt is down...")
-
- assert response.startswith("START") and response.endswith("END")
From 1fbb9b47cf01f7c6e7dc0797c28c733adb1d4587 Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 16:28:25 +0530
Subject: [PATCH 4/6] tests: bing: Move the return statement into the try-catch
block
---
tests/test_bing.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/tests/test_bing.py b/tests/test_bing.py
index 62e5026..7ea4417 100644
--- a/tests/test_bing.py
+++ b/tests/test_bing.py
@@ -29,12 +29,11 @@ def generate_response() -> str:
{"role": "user", "content": "Say hi, with your response starting with START and ending with END"}
],
)
+ return response.choices[0].message.content
except:
print("ERROR: Could not create a prompt!")
raise
- return response.choices[0].message.content
-
class TestOutput:
def test_output(self):
From 5ce4169372ca012e167cc34d7285ae943f117bec Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 16:29:07 +0530
Subject: [PATCH 5/6] tests: bing: Switch to the GPT-4 model as the default
---
tests/test_bing.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/test_bing.py b/tests/test_bing.py
index 7ea4417..161e983 100644
--- a/tests/test_bing.py
+++ b/tests/test_bing.py
@@ -15,7 +15,7 @@
# Copyright (c) 2024, YeetCode Developers
from g4f.client import Client
-from g4f.models import default
+from g4f.models import gpt_4
from g4f.Provider import Bing
@@ -24,7 +24,7 @@ def generate_response() -> str:
try:
response = client.chat.completions.create(
- model=default,
+ model=gpt_4,
messages=[
{"role": "user", "content": "Say hi, with your response starting with START and ending with END"}
],
From 62d66b7daf71f9bbe68111deec2d657f9d15504b Mon Sep 17 00:00:00 2001
From: Pratham Dubey <134331217+prathamdby@users.noreply.github.com>
Date: Tue, 26 Mar 2024 16:37:38 +0530
Subject: [PATCH 6/6] src: Bot: Guard the platform-specific attribute of the
asyncio package
---
src/Bot.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/Bot.py b/src/Bot.py
index 812c259..36d510d 100644
--- a/src/Bot.py
+++ b/src/Bot.py
@@ -44,8 +44,9 @@ def main() -> None:
default_event_loop_policy = asyncio.get_event_loop_policy()
import g4f # Trigger g4f event loop policy set # noqa: F401 # pylint: disable=unused-import # isort:skip
- if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsSelectorEventLoopPolicy):
- asyncio.set_event_loop_policy(default_event_loop_policy)
+ if hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
+ if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsSelectorEventLoopPolicy):
+ asyncio.set_event_loop_policy(default_event_loop_policy)
loaded_modules = load_modules(app)
app.run()