-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathchatbot.py
More file actions
executable file
·307 lines (256 loc) · 9.4 KB
/
chatbot.py
File metadata and controls
executable file
·307 lines (256 loc) · 9.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
#!/usr/bin/env python3
"""
Gradio Chatbot for vLLM
A simple web-based chatbot interface that connects to vLLM using OpenAI-compatible API.
Requirements:
pip install gradio openai
Usage:
python chatbot.py
Then open http://localhost:7860 in your browser.
"""
import gradio as gr
from openai import OpenAI
import sys
import os
# Configuration from environment variables
VLLM_BASE_URL = os.getenv("VLLM_BASE_URL", "http://localhost:8009/v1")
MODEL_NAME = os.getenv("MODEL_NAME", "HuggingFaceTB/SmolLM2-360M-Instruct")
# Initialize OpenAI client pointing to vLLM
try:
client = OpenAI(
base_url=VLLM_BASE_URL,
api_key="dummy" # vLLM doesn't require authentication
)
except Exception as e:
print(f"Error initializing OpenAI client: {e}")
sys.exit(1)
def chat(message, history, temperature, max_tokens, system_prompt):
"""
Chat function that processes user messages and returns AI responses.
Args:
message: Current user message
history: List of (user_msg, bot_msg) tuples
temperature: Sampling temperature (0.0 - 2.0)
max_tokens: Maximum tokens to generate
system_prompt: System message to set chatbot behavior
Yields:
Partial responses as they're generated (streaming)
"""
# Build messages list for OpenAI API
messages = []
# Add system prompt if provided
if system_prompt and system_prompt.strip():
messages.append({
"role": "system",
"content": system_prompt
})
# Convert history from tuples to message format
for user_msg, bot_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
# Add current message
messages.append({"role": "user", "content": message})
try:
# Create streaming completion
stream = client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
stream=True
)
# Stream response token by token
response = ""
for chunk in stream:
if chunk.choices[0].delta.content:
response += chunk.choices[0].delta.content
yield response
except Exception as e:
error_msg = f"Error: {str(e)}\n\nMake sure vLLM is running at {VLLM_BASE_URL}"
yield error_msg
# Create Gradio interface
with gr.Blocks(title="vLLM Chatbot") as demo:
gr.Markdown(
"""
# 🤖 vLLM Chatbot
Chat with SmolLM2 running on your local vLLM server.
**Model:** {model}
**Endpoint:** {endpoint}
""".format(model=MODEL_NAME, endpoint=VLLM_BASE_URL)
)
with gr.Row():
with gr.Column(scale=3):
# Main chat interface
chatbot = gr.Chatbot(
label="Conversation",
height=500
)
msg = gr.Textbox(
label="Your message",
placeholder="Type your message here and press Enter...",
lines=2
)
with gr.Row():
submit = gr.Button("Send", variant="primary")
clear = gr.Button("Clear")
with gr.Column(scale=1):
# Settings panel
gr.Markdown("### Settings")
system_prompt = gr.Textbox(
label="System Prompt",
placeholder="You are a helpful assistant.",
value="You are a helpful AI assistant.",
lines=3
)
temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="Temperature",
info="Higher = more creative, Lower = more focused"
)
max_tokens = gr.Slider(
minimum=50,
maximum=500,
value=200,
step=10,
label="Max Tokens",
info="Maximum length of response"
)
gr.Markdown("### Quick Actions")
example_btn1 = gr.Button("💡 Explain Docker", size="sm")
example_btn2 = gr.Button("💻 Write Python code", size="sm")
example_btn3 = gr.Button("📝 Write a poem", size="sm")
# Examples section
gr.Markdown("### Example Questions")
gr.Examples(
examples=[
["What is vLLM and how does it work?"],
["Explain the difference between Docker and Kubernetes."],
["Write a Python function to check if a number is prime."],
["What are the benefits of using containers?"],
["How do I optimize Docker images for production?"],
],
inputs=msg,
)
# Info section
with gr.Accordion("ℹ️ About", open=False):
gr.Markdown(
"""
This chatbot demonstrates local LLM inference using:
- **vLLM**: High-performance inference engine
- **SmolLM2**: Small but capable language model
- **Gradio**: Interactive web interface
- **OpenAI API**: Compatible endpoint
### Tips:
- Adjust temperature for creativity vs. consistency
- Use system prompts to guide the chatbot's behavior
- Lower max_tokens for faster responses
- Higher max_tokens for detailed answers
### Troubleshooting:
If you get connection errors:
1. Check vLLM is running: `docker compose ps`
2. Verify health: `curl http://localhost:8009/health`
3. Check logs: `docker compose logs -f vllm-cpu`
"""
)
# Event handlers
def respond(message, history, temp, tokens, sys_prompt):
"""Handle chat submission and return updated history."""
# Build messages for API
messages = []
if sys_prompt and sys_prompt.strip():
messages.append({"role": "system", "content": sys_prompt})
# Add history (already in dict format from Gradio)
if history:
messages.extend(history)
# Add current message
messages.append({"role": "user", "content": message})
try:
# Call API
stream = client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=temp,
max_tokens=tokens,
stream=True
)
# Build new history with user message and streaming response
new_history = history + [{"role": "user", "content": message}]
response = ""
for chunk in stream:
if chunk.choices[0].delta.content:
response += chunk.choices[0].delta.content
# Yield updated history with assistant response
yield new_history + [{"role": "assistant", "content": response}]
except Exception as e:
# Log error to console for debugging
print(f"ERROR: {type(e).__name__}: {str(e)}")
# Provide helpful error message to user
error_msg = f"Connection Error: {str(e)}\n\nPlease check:\n1. vLLM is running: docker compose ps\n2. API endpoint: {VLLM_BASE_URL}"
new_history = history + [{"role": "user", "content": message}]
yield new_history + [{"role": "assistant", "content": error_msg}]
# Submit on button click or Enter key
msg.submit(
respond,
inputs=[msg, chatbot, temperature, max_tokens, system_prompt],
outputs=chatbot
).then(
lambda: "", # Clear input after submission
outputs=msg
)
submit.click(
respond,
inputs=[msg, chatbot, temperature, max_tokens, system_prompt],
outputs=chatbot
).then(
lambda: "",
outputs=msg
)
# Clear conversation
clear.click(lambda: None, outputs=chatbot)
# Example button handlers
example_btn1.click(
lambda: "Explain what Docker is and how it works.",
outputs=msg
)
example_btn2.click(
lambda: "Write a Python function to calculate the factorial of a number with error handling.",
outputs=msg
)
example_btn3.click(
lambda: "Write a haiku about artificial intelligence and containers.",
outputs=msg
)
if __name__ == "__main__":
print("=" * 60)
print("Starting vLLM Chatbot Interface")
print("=" * 60)
print(f"Model: {MODEL_NAME}")
print(f"Endpoint: {VLLM_BASE_URL}")
print("\nChecking vLLM connection...")
# Test connection before starting
try:
models = client.models.list()
print(f"✓ Connected successfully!")
print(f"✓ Available models: {[m.id for m in models.data]}")
except Exception as e:
print(f"✗ Warning: Could not connect to vLLM")
print(f" Error: {e}")
print(f"\n Make sure vLLM is running:")
print(f" docker compose ps")
print(f" curl {VLLM_BASE_URL.replace('/v1', '')}/health")
print(f"\n Continuing anyway - you can start vLLM later.")
print("\n" + "=" * 60)
print("Starting Gradio interface...")
print("=" * 60)
# Launch Gradio
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set to True to create a public link
show_error=True
)