From c8a93d08f6025b27477f2fe4df93f58b8dde5297 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:17:58 -0800 Subject: [PATCH 01/22] Create autotemp.py --- playground/structs/autotemp.py | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 playground/structs/autotemp.py diff --git a/playground/structs/autotemp.py b/playground/structs/autotemp.py new file mode 100644 index 000000000..ed38a6216 --- /dev/null +++ b/playground/structs/autotemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From 1f8adceebc0366df99ae46394906e69af0d8aaac Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:19:16 -0800 Subject: [PATCH 02/22] Create autotemp_example.py --- playground/autotemp_example.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 playground/autotemp_example.py diff --git a/playground/autotemp_example.py b/playground/autotemp_example.py new file mode 100644 index 000000000..9047268d7 --- /dev/null +++ b/playground/autotemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From d86d8ec1902292f2a69eee3555513b8e087c2561 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:30:51 -0800 Subject: [PATCH 03/22] Delete swarms/models/autotemp.py --- swarms/models/autotemp.py | 101 -------------------------------------- 1 file changed, 101 deletions(-) delete mode 100644 swarms/models/autotemp.py diff --git a/swarms/models/autotemp.py b/swarms/models/autotemp.py deleted file mode 100644 index c3abb8944..000000000 --- a/swarms/models/autotemp.py +++ /dev/null @@ -1,101 +0,0 @@ -import re -from concurrent.futures import ThreadPoolExecutor, as_completed -from swarms.models.openai_models import OpenAIChat - - -class AutoTempAgent: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - - Flow: - 1. Generate outputs at a range of temperature settings. - 2. Evaluate each output using the default temperature setting. - 3. Select the best output based on the evaluation score. - 4. Return the best output. - - - Args: - temperature (float, optional): The default temperature setting to use. Defaults to 0.5. - api_key (str, optional): Your OpenAI API key. Defaults to None. - alt_temps ([type], optional): A list of alternative temperature settings to try. Defaults to None. - auto_select (bool, optional): If True, the best temperature setting will be automatically selected. Defaults to True. - max_workers (int, optional): The maximum number of workers to use when generating outputs. Defaults to 6. - - Returns: - [type]: [description] - - Examples: - >>> from swarms.demos.autotemp import AutoTemp - >>> autotemp = AutoTemp() - >>> autotemp.run("Generate a 10,000 word blog on mental clarity and the benefits of meditation.", "0.4,0.6,0.8,1.0,1.2,1.4") - Best AutoTemp Output (Temp 0.4 | Score: 100.0): - Generate a 10,000 word blog on mental clarity and the benefits of meditation. - - """ - - def __init__( - self, - temperature: float = 0.5, - api_key: str = None, - alt_temps=None, - auto_select=True, - max_workers=6, - ): - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.temperature = temperature - self.alt_temps = alt_temps - self.llm = OpenAIChat( - openai_api_key=api_key, - temperature=temperature, - ) - - def evaluate_output(self, output: str): - """Evaluate the output using the default temperature setting.""" - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {self.temperature}. - Provide a precise score from 0.0 to 100.0, considering the criteria of relevance, clarity, utility, pride, and delight. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(prompt=eval_prompt) - score_match = re.search(r"\b\d+(\.\d)?\b", score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, task: str, temperature_string): - """Run the AutoTemp agent.""" - temperature_list = [ - float(temp.strip()) for temp in temperature_string.split(",") - ] - outputs = {} - scores = {} - with ThreadPoolExecutor(max_workers=self.max_workers) as executor: - future_to_temp = { - executor.submit(self.llm.generate, task, temp): temp - for temp in temperature_list - } - for future in as_completed(future_to_temp): - temp = future_to_temp[future] - output_text = future.result() - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From e132dbf7f03ba8d59a09f4b970e265c2c51d3992 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:31:57 -0800 Subject: [PATCH 04/22] Create AutoTemp.py --- swarms/models/AutoTemp.py | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 swarms/models/AutoTemp.py diff --git a/swarms/models/AutoTemp.py b/swarms/models/AutoTemp.py new file mode 100644 index 000000000..ed38a6216 --- /dev/null +++ b/swarms/models/AutoTemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From aa7fa3238862584920176558eb136d6cec5d66f4 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:32:40 -0800 Subject: [PATCH 05/22] Delete playground/autotemp_example.py --- playground/autotemp_example.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 playground/autotemp_example.py diff --git a/playground/autotemp_example.py b/playground/autotemp_example.py deleted file mode 100644 index 9047268d7..000000000 --- a/playground/autotemp_example.py +++ /dev/null @@ -1,22 +0,0 @@ -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp - -# Your OpenAI API key -api_key = "" - -autotemp_agent = AutoTemp( - api_key=api_key, - alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], - auto_select=False, - # model_version="gpt-3.5-turbo" # Specify the model version if needed -) - -# Define the task and temperature string -task = "Generate a short story about a lost civilization." -temperature_string = "0.4,0.6,0.8,1.0,1.2," - -# Run the AutoTempAgent -result = autotemp_agent.run(task, temperature_string) - -# Print the result -print(result) From 1af9bf996768ee914ea6c8528db72426b4578f81 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:32:53 -0800 Subject: [PATCH 06/22] Delete playground/structs/autotemp.py --- playground/structs/autotemp.py | 67 ---------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 playground/structs/autotemp.py diff --git a/playground/structs/autotemp.py b/playground/structs/autotemp.py deleted file mode 100644 index ed38a6216..000000000 --- a/playground/structs/autotemp.py +++ /dev/null @@ -1,67 +0,0 @@ -import re -from swarms.models.openai_models import OpenAIChat - -class AutoTemp: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - It generates responses at different temperatures, evaluates them, and ranks them based on quality. - """ - - def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): - self.api_key = api_key - self.default_temp = default_temp - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) - - def evaluate_output(self, output, temperature): - print(f"Evaluating output at temperature {temperature}...") - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: - - - Relevance: How well does the output address the prompt or task at hand? - - Clarity: Is the output easy to understand and free of ambiguity? - - Utility: How useful is the output for its intended purpose? - - Pride: If the user had to submit this output to the world for their career, would they be proud? - - Delight: Is the output likely to delight or positively surprise the user? - - Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r'\b\d+(\.\d)?\b', score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, prompt, temperature_string): - print("Starting generation process...") - temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] - outputs = {} - scores = {} - for temp in temperature_list: - print(f"Generating at temperature {temp}...") - output_text = self.llm(prompt, temperature=temp) - if output_text: - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - print("Generation process complete.") - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From 9c04b62f8e0869dc197cea453a00963b81a20fc0 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:33:45 -0800 Subject: [PATCH 07/22] Create AutoTemp_example.py --- AutoTemp_example.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 AutoTemp_example.py diff --git a/AutoTemp_example.py b/AutoTemp_example.py new file mode 100644 index 000000000..30a46e1d9 --- /dev/null +++ b/AutoTemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.AutoTemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From c85275266a2cd7bbd355e5ee1f6f8e5b7f8d23a7 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 12:21:53 -0800 Subject: [PATCH 08/22] Create blog_gen.py --- swarms/swarms/blog_gen.py | 110 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 swarms/swarms/blog_gen.py diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py new file mode 100644 index 000000000..fa526a256 --- /dev/null +++ b/swarms/swarms/blog_gen.py @@ -0,0 +1,110 @@ +import os +from termcolor import colored +from swarms.models import OpenAIChat +from swarms.models.AutoTemp import AutoTemp +from swarms.structs import SequentialWorkflow + + +class BlogGen: + def __init__( + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2,1.4" + ): # Add blog_topic as an argument + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.7) + self.auto_temp = AutoTemp(api_key) + self.temperature_range = temperature_range + self.workflow = SequentialWorkflow(max_loops=5) + + # Formatting the topic selection prompt with the user's topic + self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" + Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. + """ + + self.DRAFT_WRITER_SYSTEM_PROMPT = """ + Create an engaging and comprehensive blog article of at least 5,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + """ + + self.REVIEW_AGENT_SYSTEM_PROMPT = """ + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the author’s voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + """ + + self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ + Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. + """ + + def run_workflow(self): + try: + # Topic generation using OpenAIChat + topic_result = self.openai_chat.generate( + [self.TOPIC_SELECTION_SYSTEM_PROMPT] + ) + topic_output = topic_result.generations[0][0].text + print( + colored( + f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", + "white", + ) + ) + + chosen_topic = topic_output.split("\n")[0] + print(colored("Selected topic: " + chosen_topic, "yellow")) + + # Initial draft generation with AutoTemp + initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( + "{{CHOSEN_TOPIC}}", chosen_topic + ) + auto_temp_output = self.auto_temp.run( + initial_draft_prompt, self.temperature_range + ) + initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly + print( + colored( + f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", + "white", + ) + ) + + # Review process using OpenAIChat + review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + review_result = self.openai_chat.generate([review_prompt]) + review_output = review_result.generations[0][0].text + print( + colored( + f"\nReview Output:\n----------------------------\n{review_output}\n", + "white", + ) + ) + + # Distribution preparation using OpenAIChat + distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + distribution_result = self.openai_chat.generate([distribution_prompt]) + distribution_output = distribution_result.generations[0][0].text + print( + colored( + f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", + "white", + ) + ) + + # Final compilation of the blog + final_blog_content = ( + f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" + ) + print( + colored( + f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", + "green", + ) + ) + + except Exception as e: + print(colored(f"An error occurred: {str(e)}", "red")) + + +if __name__ == "__main__": + api_key = os.environ["OPENAI_API_KEY"] + blog_generator = BlogGen(api_key) + blog_generator.run_workflow() From 7554fbd0cd7d572dd21ee2f322ce09c5f3599794 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 12:22:27 -0800 Subject: [PATCH 09/22] Create blog_gen_example.py --- blog_gen_example.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 blog_gen_example.py diff --git a/blog_gen_example.py b/blog_gen_example.py new file mode 100644 index 000000000..7cf955358 --- /dev/null +++ b/blog_gen_example.py @@ -0,0 +1,23 @@ +import os +from swarms.swarms.blog_gen import BlogGen + + +def main(): + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY environment variable not set.") + + blog_topic = input("Enter the topic for the blog generation: ") + + blog_generator = BlogGen(api_key, blog_topic) + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( + "{{BLOG_TOPIC}}", blog_topic + ) + ) + + blog_generator.run_workflow() + + +if __name__ == "__main__": + main() From 367ecfcbbad471435d4372ee525b4cc72ccfe93a Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 16:11:26 -0800 Subject: [PATCH 10/22] Update and rename AutoTemp_example.py to autotemp_example.py --- AutoTemp_example.py => autotemp_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename AutoTemp_example.py => autotemp_example.py (92%) diff --git a/AutoTemp_example.py b/autotemp_example.py similarity index 92% rename from AutoTemp_example.py rename to autotemp_example.py index 30a46e1d9..9047268d7 100644 --- a/AutoTemp_example.py +++ b/autotemp_example.py @@ -1,5 +1,5 @@ from swarms.models import OpenAIChat -from swarms.models.AutoTemp import AutoTemp +from swarms.models.autotemp import AutoTemp # Your OpenAI API key api_key = "" From 8403cfa35075dfc25f1b44633783d183d1bfb07f Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 16:12:38 -0800 Subject: [PATCH 11/22] Rename AutoTemp.py to autotemp.py --- swarms/models/{AutoTemp.py => autotemp.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename swarms/models/{AutoTemp.py => autotemp.py} (100%) diff --git a/swarms/models/AutoTemp.py b/swarms/models/autotemp.py similarity index 100% rename from swarms/models/AutoTemp.py rename to swarms/models/autotemp.py From dd777fa2fbb25daa563b16f7feb8a9ae85f1a4bd Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 18:33:20 -0800 Subject: [PATCH 12/22] Update blog_gen.py --- swarms/swarms/blog_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index fa526a256..4c285e40d 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -1,7 +1,7 @@ import os from termcolor import colored from swarms.models import OpenAIChat -from swarms.models.AutoTemp import AutoTemp +from swarms.models.autotemp import AutoTemp from swarms.structs import SequentialWorkflow From 8ea786e41367160c2d8a057f324f8874f4cf53d5 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 18:44:42 -0800 Subject: [PATCH 13/22] Update blog_gen.py --- swarms/swarms/blog_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index 4c285e40d..93d44c3dd 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -20,7 +20,7 @@ def __init__( """ self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 5,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. """ self.REVIEW_AGENT_SYSTEM_PROMPT = """ From 07fbf42ec2bfddca0267e630bf9e9f772770616e Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 19:56:38 -0800 Subject: [PATCH 14/22] Update blog_gen.py --- swarms/swarms/blog_gen.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index 93d44c3dd..3781d895e 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -7,9 +7,9 @@ class BlogGen: def __init__( - self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2,1.4" + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" ): # Add blog_topic as an argument - self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.7) + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) self.auto_temp = AutoTemp(api_key) self.temperature_range = temperature_range self.workflow = SequentialWorkflow(max_loops=5) @@ -20,11 +20,11 @@ def __init__( """ self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. """ self.REVIEW_AGENT_SYSTEM_PROMPT = """ - Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the author’s voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. """ self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ From 00325e5af4bceadc6315814dd5a435268e2e2671 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:07:56 -0800 Subject: [PATCH 15/22] Create autotemp.py --- playground/demos/autotemp/autotemp.py | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 playground/demos/autotemp/autotemp.py diff --git a/playground/demos/autotemp/autotemp.py b/playground/demos/autotemp/autotemp.py new file mode 100644 index 000000000..ed38a6216 --- /dev/null +++ b/playground/demos/autotemp/autotemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From d61ba72deeaefff074c9eb226785492d91e57c30 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:10:05 -0800 Subject: [PATCH 16/22] Create autotemp_example.py --- playground/demos/autotemp/autotemp_example.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 playground/demos/autotemp/autotemp_example.py diff --git a/playground/demos/autotemp/autotemp_example.py b/playground/demos/autotemp/autotemp_example.py new file mode 100644 index 000000000..9047268d7 --- /dev/null +++ b/playground/demos/autotemp/autotemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From 1b25553dff6659d70b44be8f18978c6bf90c1875 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:10:55 -0800 Subject: [PATCH 17/22] Create blog_gen.py --- playground/demos/blog_gen/blog_gen.py | 110 ++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 playground/demos/blog_gen/blog_gen.py diff --git a/playground/demos/blog_gen/blog_gen.py b/playground/demos/blog_gen/blog_gen.py new file mode 100644 index 000000000..3781d895e --- /dev/null +++ b/playground/demos/blog_gen/blog_gen.py @@ -0,0 +1,110 @@ +import os +from termcolor import colored +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp +from swarms.structs import SequentialWorkflow + + +class BlogGen: + def __init__( + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" + ): # Add blog_topic as an argument + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) + self.auto_temp = AutoTemp(api_key) + self.temperature_range = temperature_range + self.workflow = SequentialWorkflow(max_loops=5) + + # Formatting the topic selection prompt with the user's topic + self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" + Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. + """ + + self.DRAFT_WRITER_SYSTEM_PROMPT = """ + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. + """ + + self.REVIEW_AGENT_SYSTEM_PROMPT = """ + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + """ + + self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ + Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. + """ + + def run_workflow(self): + try: + # Topic generation using OpenAIChat + topic_result = self.openai_chat.generate( + [self.TOPIC_SELECTION_SYSTEM_PROMPT] + ) + topic_output = topic_result.generations[0][0].text + print( + colored( + f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", + "white", + ) + ) + + chosen_topic = topic_output.split("\n")[0] + print(colored("Selected topic: " + chosen_topic, "yellow")) + + # Initial draft generation with AutoTemp + initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( + "{{CHOSEN_TOPIC}}", chosen_topic + ) + auto_temp_output = self.auto_temp.run( + initial_draft_prompt, self.temperature_range + ) + initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly + print( + colored( + f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", + "white", + ) + ) + + # Review process using OpenAIChat + review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + review_result = self.openai_chat.generate([review_prompt]) + review_output = review_result.generations[0][0].text + print( + colored( + f"\nReview Output:\n----------------------------\n{review_output}\n", + "white", + ) + ) + + # Distribution preparation using OpenAIChat + distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + distribution_result = self.openai_chat.generate([distribution_prompt]) + distribution_output = distribution_result.generations[0][0].text + print( + colored( + f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", + "white", + ) + ) + + # Final compilation of the blog + final_blog_content = ( + f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" + ) + print( + colored( + f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", + "green", + ) + ) + + except Exception as e: + print(colored(f"An error occurred: {str(e)}", "red")) + + +if __name__ == "__main__": + api_key = os.environ["OPENAI_API_KEY"] + blog_generator = BlogGen(api_key) + blog_generator.run_workflow() From 76a1d599350b4032a9be341e6b444101ea7191e0 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:11:20 -0800 Subject: [PATCH 18/22] Create blog_gen_example.py --- playground/demos/blog_gen/blog_gen_example.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 playground/demos/blog_gen/blog_gen_example.py diff --git a/playground/demos/blog_gen/blog_gen_example.py b/playground/demos/blog_gen/blog_gen_example.py new file mode 100644 index 000000000..7cf955358 --- /dev/null +++ b/playground/demos/blog_gen/blog_gen_example.py @@ -0,0 +1,23 @@ +import os +from swarms.swarms.blog_gen import BlogGen + + +def main(): + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY environment variable not set.") + + blog_topic = input("Enter the topic for the blog generation: ") + + blog_generator = BlogGen(api_key, blog_topic) + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( + "{{BLOG_TOPIC}}", blog_topic + ) + ) + + blog_generator.run_workflow() + + +if __name__ == "__main__": + main() From 9fa2255e56476de3925a862556587914d75f2cdb Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:12:03 -0800 Subject: [PATCH 19/22] Delete autotemp_example.py --- autotemp_example.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 autotemp_example.py diff --git a/autotemp_example.py b/autotemp_example.py deleted file mode 100644 index 9047268d7..000000000 --- a/autotemp_example.py +++ /dev/null @@ -1,22 +0,0 @@ -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp - -# Your OpenAI API key -api_key = "" - -autotemp_agent = AutoTemp( - api_key=api_key, - alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], - auto_select=False, - # model_version="gpt-3.5-turbo" # Specify the model version if needed -) - -# Define the task and temperature string -task = "Generate a short story about a lost civilization." -temperature_string = "0.4,0.6,0.8,1.0,1.2," - -# Run the AutoTempAgent -result = autotemp_agent.run(task, temperature_string) - -# Print the result -print(result) From 90f4edd22877a2e973fad888b4c81c5738a3b009 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:12:26 -0800 Subject: [PATCH 20/22] Delete blog_gen_example.py --- blog_gen_example.py | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 blog_gen_example.py diff --git a/blog_gen_example.py b/blog_gen_example.py deleted file mode 100644 index 7cf955358..000000000 --- a/blog_gen_example.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -from swarms.swarms.blog_gen import BlogGen - - -def main(): - api_key = os.getenv("OPENAI_API_KEY") - if not api_key: - raise ValueError("OPENAI_API_KEY environment variable not set.") - - blog_topic = input("Enter the topic for the blog generation: ") - - blog_generator = BlogGen(api_key, blog_topic) - blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( - blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( - "{{BLOG_TOPIC}}", blog_topic - ) - ) - - blog_generator.run_workflow() - - -if __name__ == "__main__": - main() From b88ac057a011d472acc3d36f681f073ba0d89546 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:20:02 -0800 Subject: [PATCH 21/22] Delete swarms/models/autotemp.py --- swarms/models/autotemp.py | 67 --------------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 swarms/models/autotemp.py diff --git a/swarms/models/autotemp.py b/swarms/models/autotemp.py deleted file mode 100644 index ed38a6216..000000000 --- a/swarms/models/autotemp.py +++ /dev/null @@ -1,67 +0,0 @@ -import re -from swarms.models.openai_models import OpenAIChat - -class AutoTemp: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - It generates responses at different temperatures, evaluates them, and ranks them based on quality. - """ - - def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): - self.api_key = api_key - self.default_temp = default_temp - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) - - def evaluate_output(self, output, temperature): - print(f"Evaluating output at temperature {temperature}...") - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: - - - Relevance: How well does the output address the prompt or task at hand? - - Clarity: Is the output easy to understand and free of ambiguity? - - Utility: How useful is the output for its intended purpose? - - Pride: If the user had to submit this output to the world for their career, would they be proud? - - Delight: Is the output likely to delight or positively surprise the user? - - Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r'\b\d+(\.\d)?\b', score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, prompt, temperature_string): - print("Starting generation process...") - temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] - outputs = {} - scores = {} - for temp in temperature_list: - print(f"Generating at temperature {temp}...") - output_text = self.llm(prompt, temperature=temp) - if output_text: - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - print("Generation process complete.") - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From 5c94fd2dd01db510b8a5a85abdd01f3ab299a030 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:20:27 -0800 Subject: [PATCH 22/22] Delete swarms/swarms/blog_gen.py --- swarms/swarms/blog_gen.py | 110 -------------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 swarms/swarms/blog_gen.py diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py deleted file mode 100644 index 3781d895e..000000000 --- a/swarms/swarms/blog_gen.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -from termcolor import colored -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp -from swarms.structs import SequentialWorkflow - - -class BlogGen: - def __init__( - self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" - ): # Add blog_topic as an argument - self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) - self.auto_temp = AutoTemp(api_key) - self.temperature_range = temperature_range - self.workflow = SequentialWorkflow(max_loops=5) - - # Formatting the topic selection prompt with the user's topic - self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" - Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. - """ - - self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. - """ - - self.REVIEW_AGENT_SYSTEM_PROMPT = """ - Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. - """ - - self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ - Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. - """ - - def run_workflow(self): - try: - # Topic generation using OpenAIChat - topic_result = self.openai_chat.generate( - [self.TOPIC_SELECTION_SYSTEM_PROMPT] - ) - topic_output = topic_result.generations[0][0].text - print( - colored( - f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", - "white", - ) - ) - - chosen_topic = topic_output.split("\n")[0] - print(colored("Selected topic: " + chosen_topic, "yellow")) - - # Initial draft generation with AutoTemp - initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( - "{{CHOSEN_TOPIC}}", chosen_topic - ) - auto_temp_output = self.auto_temp.run( - initial_draft_prompt, self.temperature_range - ) - initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly - print( - colored( - f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", - "white", - ) - ) - - # Review process using OpenAIChat - review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( - "{{ARTICLE_TOPIC}}", chosen_topic - ) - review_result = self.openai_chat.generate([review_prompt]) - review_output = review_result.generations[0][0].text - print( - colored( - f"\nReview Output:\n----------------------------\n{review_output}\n", - "white", - ) - ) - - # Distribution preparation using OpenAIChat - distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( - "{{ARTICLE_TOPIC}}", chosen_topic - ) - distribution_result = self.openai_chat.generate([distribution_prompt]) - distribution_output = distribution_result.generations[0][0].text - print( - colored( - f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", - "white", - ) - ) - - # Final compilation of the blog - final_blog_content = ( - f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" - ) - print( - colored( - f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", - "green", - ) - ) - - except Exception as e: - print(colored(f"An error occurred: {str(e)}", "red")) - - -if __name__ == "__main__": - api_key = os.environ["OPENAI_API_KEY"] - blog_generator = BlogGen(api_key) - blog_generator.run_workflow()