DSDUDEd commited on
Commit
785e38d
·
verified ·
1 Parent(s): 3f2324f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import time
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ import gradio as gr
6
+
7
+ # Load model
8
+ tokenizer = AutoTokenizer.from_pretrained("TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ")
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ",
11
+ device_map="auto",
12
+ torch_dtype=torch.float16
13
+ )
14
+
15
+ # Function to generate AI response
16
+ def generate_response(prompt, chat_history=[]):
17
+ # Combine history into context
18
+ context = ""
19
+ for user_msg, ai_msg in chat_history:
20
+ context += f"User: {user_msg}\nAI: {ai_msg}\n"
21
+ context += f"User: {prompt}\nAI:"
22
+
23
+ inputs = tokenizer(context, return_tensors="pt").to("cuda")
24
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
25
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+
27
+ # Extract only AI response
28
+ response = response.split("AI:")[-1].strip()
29
+ return response
30
+
31
+ # Simulate live typing
32
+ def live_typing(prompt, chat_history):
33
+ response = generate_response(prompt, chat_history)
34
+ displayed_text = ""
35
+ for char in response:
36
+ displayed_text += char
37
+ time.sleep(0.02) # Typing speed
38
+ yield displayed_text
39
+
40
+ # Gradio UI
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("## 🤖 Wizard-Vicuna Chatbot with Avatars and Live Typing")
43
+
44
+ with gr.Row():
45
+ with gr.Column(scale=1):
46
+ user_avatar = gr.Image("user_avatar.png", elem_id="user-avatar")
47
+ with gr.Column(scale=4):
48
+ user_input = gr.Textbox(label="Your Message", placeholder="Type something...")
49
+ chat_state = gr.State([]) # Keep chat history
50
+
51
+ with gr.Row():
52
+ with gr.Column(scale=1):
53
+ ai_avatar = gr.Image("ai_avatar.png", elem_id="ai-avatar")
54
+ with gr.Column(scale=4):
55
+ output_box = gr.Textbox(label="AI is typing...", lines=8)
56
+
57
+ def chat(user_message, history):
58
+ # Append AI response after live typing
59
+ for partial in live_typing(user_message, history):
60
+ yield partial, history
61
+ history.append((user_message, partial))
62
+ yield partial, history
63
+
64
+ user_input.submit(chat, inputs=[user_input, chat_state], outputs=[output_box, chat_state])
65
+
66
+ demo.launch()