Spaces:
Sleeping
Sleeping
claude suggestions
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
| 3 |
|
| 4 |
interviewer_system_prompt = """
|
| 5 |
# Task
|
|
@@ -114,7 +115,7 @@ with gr.Blocks() as demo:
|
|
| 114 |
def user_submit(user_message, history):
|
| 115 |
return "", history + [{"role": "user", "content": user_message}]
|
| 116 |
|
| 117 |
-
def bot_respond(history, system_msg, max_tok, temp, top_p_val, hf_token):
|
| 118 |
if not history or history[-1]["role"] != "user":
|
| 119 |
return history
|
| 120 |
|
|
@@ -126,7 +127,7 @@ with gr.Blocks() as demo:
|
|
| 126 |
bot_response = response
|
| 127 |
yield history + [{"role": "assistant", "content": bot_response}]
|
| 128 |
|
| 129 |
-
def handle_auto_respond(history, max_tok, temp, top_p_val, hf_token):
|
| 130 |
if not history:
|
| 131 |
return history
|
| 132 |
|
|
@@ -137,7 +138,7 @@ with gr.Blocks() as demo:
|
|
| 137 |
updated_history = history + [{"role": "user", "content": auto_response}]
|
| 138 |
return updated_history
|
| 139 |
|
| 140 |
-
def bot_respond_after_auto(history, system_msg, max_tok, temp, top_p_val, hf_token):
|
| 141 |
# This is called after auto-respond to get the interviewer's response
|
| 142 |
if not history or history[-1]["role"] != "user":
|
| 143 |
return history
|
|
@@ -158,7 +159,7 @@ with gr.Blocks() as demo:
|
|
| 158 |
queue=False
|
| 159 |
).then(
|
| 160 |
bot_respond,
|
| 161 |
-
[chatbot, system_prompt, max_tokens, temperature, top_p
|
| 162 |
chatbot
|
| 163 |
)
|
| 164 |
|
|
@@ -169,19 +170,19 @@ with gr.Blocks() as demo:
|
|
| 169 |
queue=False
|
| 170 |
).then(
|
| 171 |
bot_respond,
|
| 172 |
-
[chatbot, system_prompt, max_tokens, temperature, top_p
|
| 173 |
chatbot
|
| 174 |
)
|
| 175 |
|
| 176 |
# Wire up the auto-respond button
|
| 177 |
auto_respond_btn.click(
|
| 178 |
handle_auto_respond,
|
| 179 |
-
[chatbot, max_tokens, temperature, top_p
|
| 180 |
chatbot,
|
| 181 |
queue=False
|
| 182 |
).then(
|
| 183 |
bot_respond_after_auto,
|
| 184 |
-
[chatbot, system_prompt, max_tokens, temperature, top_p
|
| 185 |
chatbot
|
| 186 |
)
|
| 187 |
|
|
@@ -193,12 +194,13 @@ def respond(
|
|
| 193 |
max_tokens,
|
| 194 |
temperature,
|
| 195 |
top_p,
|
| 196 |
-
hf_token: gr.OAuthToken,
|
| 197 |
):
|
| 198 |
"""
|
| 199 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 200 |
"""
|
| 201 |
-
|
|
|
|
| 202 |
|
| 203 |
messages = [{"role": "system", "content": system_message}]
|
| 204 |
|
|
@@ -229,7 +231,7 @@ def generate_auto_response(
|
|
| 229 |
max_tokens,
|
| 230 |
temperature,
|
| 231 |
top_p,
|
| 232 |
-
hf_token: gr.OAuthToken,
|
| 233 |
):
|
| 234 |
"""
|
| 235 |
Generates an automatic candidate response based on the interviewer's last question.
|
|
@@ -243,7 +245,8 @@ def generate_auto_response(
|
|
| 243 |
if not last_message or last_message.get("role") != "assistant":
|
| 244 |
return "Thank you for your question. I'm happy to answer."
|
| 245 |
|
| 246 |
-
|
|
|
|
| 247 |
|
| 248 |
# Create a system prompt for the candidate
|
| 249 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
+
from typing import Optional
|
| 4 |
|
| 5 |
interviewer_system_prompt = """
|
| 6 |
# Task
|
|
|
|
| 115 |
def user_submit(user_message, history):
|
| 116 |
return "", history + [{"role": "user", "content": user_message}]
|
| 117 |
|
| 118 |
+
def bot_respond(history, system_msg, max_tok, temp, top_p_val, hf_token: Optional[gr.OAuthToken] = None):
|
| 119 |
if not history or history[-1]["role"] != "user":
|
| 120 |
return history
|
| 121 |
|
|
|
|
| 127 |
bot_response = response
|
| 128 |
yield history + [{"role": "assistant", "content": bot_response}]
|
| 129 |
|
| 130 |
+
def handle_auto_respond(history, max_tok, temp, top_p_val, hf_token: Optional[gr.OAuthToken] = None):
|
| 131 |
if not history:
|
| 132 |
return history
|
| 133 |
|
|
|
|
| 138 |
updated_history = history + [{"role": "user", "content": auto_response}]
|
| 139 |
return updated_history
|
| 140 |
|
| 141 |
+
def bot_respond_after_auto(history, system_msg, max_tok, temp, top_p_val, hf_token: Optional[gr.OAuthToken] = None):
|
| 142 |
# This is called after auto-respond to get the interviewer's response
|
| 143 |
if not history or history[-1]["role"] != "user":
|
| 144 |
return history
|
|
|
|
| 159 |
queue=False
|
| 160 |
).then(
|
| 161 |
bot_respond,
|
| 162 |
+
[chatbot, system_prompt, max_tokens, temperature, top_p],
|
| 163 |
chatbot
|
| 164 |
)
|
| 165 |
|
|
|
|
| 170 |
queue=False
|
| 171 |
).then(
|
| 172 |
bot_respond,
|
| 173 |
+
[chatbot, system_prompt, max_tokens, temperature, top_p],
|
| 174 |
chatbot
|
| 175 |
)
|
| 176 |
|
| 177 |
# Wire up the auto-respond button
|
| 178 |
auto_respond_btn.click(
|
| 179 |
handle_auto_respond,
|
| 180 |
+
[chatbot, max_tokens, temperature, top_p],
|
| 181 |
chatbot,
|
| 182 |
queue=False
|
| 183 |
).then(
|
| 184 |
bot_respond_after_auto,
|
| 185 |
+
[chatbot, system_prompt, max_tokens, temperature, top_p],
|
| 186 |
chatbot
|
| 187 |
)
|
| 188 |
|
|
|
|
| 194 |
max_tokens,
|
| 195 |
temperature,
|
| 196 |
top_p,
|
| 197 |
+
hf_token: Optional[gr.OAuthToken] = None,
|
| 198 |
):
|
| 199 |
"""
|
| 200 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 201 |
"""
|
| 202 |
+
token = hf_token.token if hf_token else None
|
| 203 |
+
client = InferenceClient(token=token, model="CohereForAI/aya-23-35B")
|
| 204 |
|
| 205 |
messages = [{"role": "system", "content": system_message}]
|
| 206 |
|
|
|
|
| 231 |
max_tokens,
|
| 232 |
temperature,
|
| 233 |
top_p,
|
| 234 |
+
hf_token: Optional[gr.OAuthToken] = None,
|
| 235 |
):
|
| 236 |
"""
|
| 237 |
Generates an automatic candidate response based on the interviewer's last question.
|
|
|
|
| 245 |
if not last_message or last_message.get("role") != "assistant":
|
| 246 |
return "Thank you for your question. I'm happy to answer."
|
| 247 |
|
| 248 |
+
token = hf_token.token if hf_token else None
|
| 249 |
+
client = InferenceClient(token=token, model="CohereForAI/aya-23-35B")
|
| 250 |
|
| 251 |
# Create a system prompt for the candidate
|
| 252 |
|