From 65fe2382a933a8134d1604493757b49d2066f33d Mon Sep 17 00:00:00 2001 From: Ayush Suryavanshi <113771722+AYUSHSURYAVANSHI@users.noreply.github.com> Date: Thu, 12 Oct 2023 00:05:29 +0530 Subject: [PATCH] Create chat_bot.py --- chat_bot.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 chat_bot.py diff --git a/chat_bot.py b/chat_bot.py new file mode 100644 index 0000000..c608f65 --- /dev/null +++ b/chat_bot.py @@ -0,0 +1,27 @@ +# Import the required libraries +import torch +from transformers import GPT2LMHeadModel, GPT2Tokenizer + +# Load the pre-trained GPT-2 model and tokenizer +model_name = "gpt2" # You can choose a different model size if needed +model = GPT2LMHeadModel.from_pretrained(model_name) +tokenizer = GPT2Tokenizer.from_pretrained(model_name) + +# Set the device (CPU or GPU) +device = "cuda" if torch.cuda.is_available() else "cpu" +model.to(device) + +# Define a function to generate responses +def generate_response(input_text, max_length=100): + input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device) + output = model.generate(input_ids, max_length=max_length, num_return_sequences=1) + response = tokenizer.decode(output[0], skip_special_tokens=True) + return response + +# Example conversation loop +while True: + user_input = input("You: ") + if user_input.lower() == "exit": + break + response = generate_response(user_input) + print("Bot:", response)