Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -121,64 +121,7 @@ class SearchEngine:
|
|
| 121 |
return results
|
| 122 |
|
| 123 |
|
| 124 |
-
class Chatbot:
|
| 125 |
-
def __init__(self, model_name="EleutherAI/gpt-neo-125M"):
|
| 126 |
-
"""
|
| 127 |
-
Initializes the chatbot with GPT-Neo.
|
| 128 |
-
"""
|
| 129 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 130 |
-
self.model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 131 |
-
|
| 132 |
-
# Set pad_token to eos_token if not already defined
|
| 133 |
-
if self.tokenizer.pad_token is None:
|
| 134 |
-
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 135 |
|
| 136 |
-
def generate_response(self, prompt, max_length=100):
|
| 137 |
-
"""
|
| 138 |
-
Generates a response to a user query using GPT-Neo.
|
| 139 |
-
"""
|
| 140 |
-
# Tokenize the input prompt
|
| 141 |
-
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
| 142 |
-
|
| 143 |
-
# Generate the response
|
| 144 |
-
outputs = self.model.generate(
|
| 145 |
-
inputs.input_ids,
|
| 146 |
-
attention_mask=inputs.attention_mask, # Pass the attention mask
|
| 147 |
-
max_length=max_length,
|
| 148 |
-
num_return_sequences=1,
|
| 149 |
-
pad_token_id=self.tokenizer.pad_token_id, # Use the defined pad_token_id
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
-
# Decode the generated response
|
| 153 |
-
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 154 |
-
return response
|
| 155 |
-
|
| 156 |
-
def handle_request(self, prompt):
|
| 157 |
-
"""
|
| 158 |
-
Handles user requests by determining the intent and delegating to the appropriate function.
|
| 159 |
-
"""
|
| 160 |
-
# Check if the user wants to search for something
|
| 161 |
-
if "search" in prompt.lower():
|
| 162 |
-
query = prompt.lower().replace("search", "").strip()
|
| 163 |
-
results = search_engine.search(query)
|
| 164 |
-
return {"type": "search", "results": results}
|
| 165 |
-
|
| 166 |
-
# Check if the user wants a summary
|
| 167 |
-
elif "summarize" in prompt.lower() or "summary" in prompt.lower():
|
| 168 |
-
text = prompt.lower().replace("summarize", "").replace("summary", "").strip()
|
| 169 |
-
summary = summarizer.summarize(text)
|
| 170 |
-
return {"type": "summary", "summary": summary}
|
| 171 |
-
|
| 172 |
-
# Check if the user wants to extract topics
|
| 173 |
-
elif "topics" in prompt.lower() or "topic" in prompt.lower():
|
| 174 |
-
text = prompt.lower().replace("topics", "").replace("topic", "").strip()
|
| 175 |
-
topics = topic_extractor.extract_topics([text])
|
| 176 |
-
return {"type": "topics", "topics": topics.to_dict()}
|
| 177 |
-
|
| 178 |
-
# Default to generating a conversational response
|
| 179 |
-
else:
|
| 180 |
-
response = self.generate_response(prompt)
|
| 181 |
-
return {"type": "chat", "response": response}
|
| 182 |
|
| 183 |
|
| 184 |
# Initialize models
|
|
|
|
| 121 |
return results
|
| 122 |
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
|
| 127 |
# Initialize models
|