Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,588 +1,26 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Improved Gradio Interface with MCP Client and SmolAgents
|
| 4 |
-
"""
|
| 5 |
-
|
| 6 |
import gradio as gr
|
| 7 |
import os
|
| 8 |
-
import asyncio
|
| 9 |
-
import logging
|
| 10 |
-
from typing import Optional
|
| 11 |
-
from concurrent.futures import ThreadPoolExecutor
|
| 12 |
-
from pathlib import Path
|
| 13 |
|
| 14 |
-
# MCP and SmolAgents imports
|
| 15 |
from smolagents import InferenceClientModel, CodeAgent, MCPClient
|
| 16 |
|
| 17 |
-
logging.basicConfig(level=logging.INFO)
|
| 18 |
-
logger = logging.getLogger(__name__)
|
| 19 |
-
|
| 20 |
-
# ============================================================================
|
| 21 |
-
# ENVIRONMENT CONFIGURATION
|
| 22 |
-
# ============================================================================
|
| 23 |
-
|
| 24 |
-
def load_environment_variables():
|
| 25 |
-
"""
|
| 26 |
-
Load environment variables with platform-specific handling.
|
| 27 |
-
|
| 28 |
-
For Hugging Face Spaces: Uses os.environ directly
|
| 29 |
-
For local development: Loads from .env file with fallbacks
|
| 30 |
-
"""
|
| 31 |
-
|
| 32 |
-
def is_huggingface_space():
|
| 33 |
-
"""Detect if running on Hugging Face Spaces platform"""
|
| 34 |
-
return (
|
| 35 |
-
os.getenv("SPACE_ID") is not None or
|
| 36 |
-
os.getenv("SPACE_AUTHOR_NAME") is not None or
|
| 37 |
-
os.getenv("GRADIO_SERVER_NAME") is not None
|
| 38 |
-
)
|
| 39 |
-
|
| 40 |
-
def load_dotenv_file():
|
| 41 |
-
"""Load variables from .env file for local development"""
|
| 42 |
-
env_file = Path(".env")
|
| 43 |
-
env_vars = {}
|
| 44 |
-
|
| 45 |
-
if env_file.exists():
|
| 46 |
-
try:
|
| 47 |
-
with open(env_file, 'r', encoding='utf-8') as f:
|
| 48 |
-
for line in f:
|
| 49 |
-
line = line.strip()
|
| 50 |
-
if line and not line.startswith('#') and '=' in line:
|
| 51 |
-
key, value = line.split('=', 1)
|
| 52 |
-
# Remove quotes if present
|
| 53 |
-
value = value.strip().strip('"').strip("'")
|
| 54 |
-
env_vars[key.strip()] = value
|
| 55 |
-
logger.info(f"Loaded {len(env_vars)} variables from .env file")
|
| 56 |
-
except Exception as e:
|
| 57 |
-
logger.warning(f"Failed to load .env file: {e}")
|
| 58 |
-
else:
|
| 59 |
-
logger.info("No .env file found, using system environment variables")
|
| 60 |
-
|
| 61 |
-
return env_vars
|
| 62 |
-
|
| 63 |
-
# Platform detection
|
| 64 |
-
on_huggingface = is_huggingface_space()
|
| 65 |
-
logger.info(f"Platform detected: {'Hugging Face Spaces' if on_huggingface else 'Local Development'}")
|
| 66 |
-
|
| 67 |
-
# Load environment variables
|
| 68 |
-
if on_huggingface:
|
| 69 |
-
# On Hugging Face Spaces: use environment variables directly
|
| 70 |
-
hf_token = os.getenv("HF_TOKEN")
|
| 71 |
-
mcp_server_url = os.getenv("MCP_SERVER_URL", "http://localhost:7860/gradio_api/mcp/sse")
|
| 72 |
-
|
| 73 |
-
logger.info("Using Hugging Face Spaces environment variables")
|
| 74 |
-
|
| 75 |
-
else:
|
| 76 |
-
# Local development: try .env file first, then system environment
|
| 77 |
-
env_vars = load_dotenv_file()
|
| 78 |
-
|
| 79 |
-
# Get HF_TOKEN
|
| 80 |
-
hf_token = (
|
| 81 |
-
env_vars.get("HF_TOKEN") or
|
| 82 |
-
os.getenv("HF_TOKEN")
|
| 83 |
-
)
|
| 84 |
-
|
| 85 |
-
# Get MCP_SERVER_URL
|
| 86 |
-
mcp_server_url = (
|
| 87 |
-
env_vars.get("MCP_SERVER_URL") or
|
| 88 |
-
os.getenv("MCP_SERVER_URL") or
|
| 89 |
-
"http://localhost:7860/gradio_api/mcp/sse" # Default for local development
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
logger.info("Using local development environment configuration")
|
| 93 |
-
|
| 94 |
-
# Validate required variables
|
| 95 |
-
if not hf_token:
|
| 96 |
-
logger.error("HF_TOKEN not found in environment variables!")
|
| 97 |
-
logger.error("Please set HF_TOKEN in:")
|
| 98 |
-
if on_huggingface:
|
| 99 |
-
logger.error("- Hugging Face Spaces secrets/environment variables")
|
| 100 |
-
else:
|
| 101 |
-
logger.error("- .env file (HF_TOKEN=your_token_here)")
|
| 102 |
-
logger.error("- System environment variables")
|
| 103 |
-
raise ValueError("HF_TOKEN is required but not found")
|
| 104 |
-
|
| 105 |
-
# Set environment variables for the application
|
| 106 |
-
os.environ["HF_TOKEN"] = hf_token
|
| 107 |
-
|
| 108 |
-
# Log configuration (without exposing sensitive data)
|
| 109 |
-
logger.info(f"HF_TOKEN configured: {'✓' if hf_token else '✗'}")
|
| 110 |
-
logger.info(f"MCP_SERVER_URL: {mcp_server_url}")
|
| 111 |
-
|
| 112 |
-
return hf_token, mcp_server_url
|
| 113 |
|
| 114 |
-
# Load environment configuration
|
| 115 |
try:
|
| 116 |
-
HF_TOKEN, MCP_SERVER_URL = load_environment_variables()
|
| 117 |
-
except Exception as e:
|
| 118 |
-
logger.error(f"Failed to load environment configuration: {e}")
|
| 119 |
-
# Fallback values for development/testing
|
| 120 |
-
HF_TOKEN = os.getenv("HF_TOKEN", "")
|
| 121 |
-
MCP_SERVER_URL = "http://localhost:7860/gradio_api/mcp/sse"
|
| 122 |
-
|
| 123 |
-
if not HF_TOKEN:
|
| 124 |
-
logger.warning("Running without HF_TOKEN - some features may not work")
|
| 125 |
-
|
| 126 |
-
# Global variables
|
| 127 |
-
agent: Optional[CodeAgent] = None
|
| 128 |
-
tools: Optional[list] = None
|
| 129 |
-
mcp_client: Optional[MCPClient] = None
|
| 130 |
-
executor = ThreadPoolExecutor(max_workers=4)
|
| 131 |
-
|
| 132 |
-
# ============================================================================
|
| 133 |
-
# ASYNC UTILITIES
|
| 134 |
-
# ============================================================================
|
| 135 |
-
|
| 136 |
-
def run_async_in_thread(coro):
|
| 137 |
-
"""Run async coroutine in a separate thread with its own event loop"""
|
| 138 |
-
def run_in_thread():
|
| 139 |
-
loop = asyncio.new_event_loop()
|
| 140 |
-
asyncio.set_event_loop(loop)
|
| 141 |
-
try:
|
| 142 |
-
return loop.run_until_complete(coro)
|
| 143 |
-
finally:
|
| 144 |
-
loop.close()
|
| 145 |
-
|
| 146 |
-
future = executor.submit(run_in_thread)
|
| 147 |
-
return future.result(timeout=60)
|
| 148 |
-
|
| 149 |
-
# ============================================================================
|
| 150 |
-
# MCP CLIENT AND AGENT INITIALIZATION
|
| 151 |
-
# ============================================================================
|
| 152 |
-
|
| 153 |
-
async def _initialize_mcp_client():
|
| 154 |
-
"""Async helper for MCP client initialization"""
|
| 155 |
-
global mcp_client, tools, agent
|
| 156 |
-
|
| 157 |
-
logger.info(f"Connecting to MCP server at: {MCP_SERVER_URL}")
|
| 158 |
-
|
| 159 |
-
# Create MCP Client connection
|
| 160 |
mcp_client = MCPClient(
|
| 161 |
-
{"url": MCP_SERVER_URL
|
| 162 |
)
|
| 163 |
-
|
| 164 |
-
# Get available tools from the MCP server
|
| 165 |
-
logger.info("Retrieving tools from MCP server...")
|
| 166 |
-
all_tools = mcp_client.get_tools()
|
| 167 |
-
logger.info(f"Retrieved {len(all_tools)} tools from MCP server")
|
| 168 |
-
|
| 169 |
-
# Filter out duplicate tool names
|
| 170 |
-
seen_names = set()
|
| 171 |
-
tools = []
|
| 172 |
-
for tool in all_tools:
|
| 173 |
-
tool_name = getattr(tool, 'name', f'tool_{len(tools)}')
|
| 174 |
-
if tool_name not in seen_names and not tool_name.startswith('lambda'):
|
| 175 |
-
seen_names.add(tool_name)
|
| 176 |
-
tools.append(tool)
|
| 177 |
-
|
| 178 |
-
logger.info(f"Using {len(tools)} unique tools (filtered out duplicates and lambda functions)")
|
| 179 |
-
for i, tool in enumerate(tools, 1):
|
| 180 |
-
tool_name = getattr(tool, 'name', f'tool_{i}')
|
| 181 |
-
tool_desc = getattr(tool, 'description', 'No description available')
|
| 182 |
-
logger.info(f" {i}. {tool_name}: {tool_desc}")
|
| 183 |
-
|
| 184 |
-
# Initialize the Hugging Face model
|
| 185 |
-
logger.info("Initializing InferenceClientModel...")
|
| 186 |
-
try:
|
| 187 |
-
# Try with different free models (2024-2025 актуальные)
|
| 188 |
-
models_to_try = [
|
| 189 |
-
"facebook/blenderbot-400M-distill",
|
| 190 |
-
"google/flan-t5-small",
|
| 191 |
-
"huggingface/CodeBERTa-small-v1",
|
| 192 |
-
"bigscience/bloom-560m"
|
| 193 |
-
]
|
| 194 |
-
|
| 195 |
-
model = None
|
| 196 |
-
for model_name in models_to_try:
|
| 197 |
-
try:
|
| 198 |
-
logger.info(f"Trying model: {model_name}")
|
| 199 |
-
model = InferenceClientModel(
|
| 200 |
-
model=model_name,
|
| 201 |
-
token=os.getenv("HF_TOKEN"),
|
| 202 |
-
timeout=30
|
| 203 |
-
)
|
| 204 |
-
logger.info(f"Successfully initialized with {model_name}")
|
| 205 |
-
break
|
| 206 |
-
except Exception as model_error:
|
| 207 |
-
logger.warning(f"Failed with {model_name}: {model_error}")
|
| 208 |
-
continue
|
| 209 |
-
|
| 210 |
-
if model is None:
|
| 211 |
-
# Last resort - try without specifying model
|
| 212 |
-
logger.info("Trying default model configuration...")
|
| 213 |
-
model = InferenceClientModel(
|
| 214 |
-
token=os.getenv("HF_TOKEN"),
|
| 215 |
-
timeout=30
|
| 216 |
-
)
|
| 217 |
-
|
| 218 |
-
except Exception as e:
|
| 219 |
-
logger.error(f"All model initialization attempts failed: {e}")
|
| 220 |
-
# Create a mock model for testing
|
| 221 |
-
logger.info("Creating mock model for testing...")
|
| 222 |
-
|
| 223 |
-
class MockModel:
|
| 224 |
-
def __init__(self):
|
| 225 |
-
self.name = "MockModel"
|
| 226 |
-
|
| 227 |
-
async def __call__(self, messages, **kwargs):
|
| 228 |
-
# Simple mock response
|
| 229 |
-
if messages and len(messages) > 0:
|
| 230 |
-
user_message = str(messages[-1]).lower()
|
| 231 |
-
if "sentiment" in user_message or "analyze" in user_message:
|
| 232 |
-
return "I'll help you analyze sentiment. Please use the analyze_sentiment tool."
|
| 233 |
-
elif "health" in user_message:
|
| 234 |
-
return "I'll check the system health for you using the health_check tool."
|
| 235 |
-
elif "tools" in user_message:
|
| 236 |
-
return "Let me show you the available tools using the get_backend_info tool."
|
| 237 |
-
else:
|
| 238 |
-
return "I can help you with sentiment analysis. What would you like to analyze?"
|
| 239 |
-
return "How can I help you with sentiment analysis?"
|
| 240 |
-
|
| 241 |
-
model = MockModel()
|
| 242 |
-
logger.info("Mock model created successfully")
|
| 243 |
-
|
| 244 |
-
# Create the CodeAgent with discovered tools
|
| 245 |
-
logger.info("Creating CodeAgent with MCP tools...")
|
| 246 |
-
try:
|
| 247 |
-
agent = CodeAgent(
|
| 248 |
-
tools=[*tools],
|
| 249 |
-
model=model,
|
| 250 |
-
max_steps=3
|
| 251 |
-
)
|
| 252 |
-
logger.info("CodeAgent created successfully")
|
| 253 |
-
except Exception as e:
|
| 254 |
-
logger.error(f"Failed to create CodeAgent: {e}")
|
| 255 |
-
# Create a simple agent wrapper
|
| 256 |
-
logger.info("Creating simple agent wrapper...")
|
| 257 |
-
|
| 258 |
-
class SimpleAgent:
|
| 259 |
-
def __init__(self, tools, model):
|
| 260 |
-
self.tools = tools
|
| 261 |
-
self.model = model
|
| 262 |
-
self.name = "SimpleAgent"
|
| 263 |
-
|
| 264 |
-
async def run(self, query):
|
| 265 |
-
query_lower = query.lower()
|
| 266 |
-
|
| 267 |
-
# Direct tool mapping
|
| 268 |
-
if "analyze" in query_lower and ":" in query:
|
| 269 |
-
text = query.split(":", 1)[1].strip().strip("'\"")
|
| 270 |
-
for tool in self.tools:
|
| 271 |
-
if getattr(tool, 'name', '') == 'analyze_sentiment':
|
| 272 |
-
try:
|
| 273 |
-
result = await tool(text=text)
|
| 274 |
-
return f"Sentiment Analysis Result: {result}"
|
| 275 |
-
except Exception as e:
|
| 276 |
-
return f"Error analyzing sentiment: {e}"
|
| 277 |
-
|
| 278 |
-
elif "health" in query_lower:
|
| 279 |
-
for tool in self.tools:
|
| 280 |
-
if 'health' in getattr(tool, 'name', '').lower():
|
| 281 |
-
try:
|
| 282 |
-
result = await tool()
|
| 283 |
-
return f"Health Check: {result}"
|
| 284 |
-
except Exception as e:
|
| 285 |
-
return f"Error checking health: {e}"
|
| 286 |
-
|
| 287 |
-
elif "tools" in query_lower or "available" in query_lower:
|
| 288 |
-
tool_list = []
|
| 289 |
-
for i, tool in enumerate(self.tools, 1):
|
| 290 |
-
tool_name = getattr(tool, 'name', f'tool_{i}')
|
| 291 |
-
tool_desc = getattr(tool, 'description', 'No description')
|
| 292 |
-
tool_list.append(f"{i}. {tool_name}: {tool_desc}")
|
| 293 |
-
return "Available tools:\n" + "\n".join(tool_list)
|
| 294 |
-
|
| 295 |
-
elif "batch" in query_lower:
|
| 296 |
-
for tool in self.tools:
|
| 297 |
-
if 'batch' in getattr(tool, 'name', '').lower():
|
| 298 |
-
try:
|
| 299 |
-
texts = ["I love this!", "This is terrible", "It's okay"]
|
| 300 |
-
result = await tool(texts=texts)
|
| 301 |
-
return f"Batch Analysis: {result}"
|
| 302 |
-
except Exception as e:
|
| 303 |
-
return f"Error in batch analysis: {e}"
|
| 304 |
-
|
| 305 |
-
else:
|
| 306 |
-
return """I can help you with:
|
| 307 |
-
|
| 308 |
-
• Sentiment Analysis: "analyze: [your text]"
|
| 309 |
-
• Health Check: "health check"
|
| 310 |
-
• List Tools: "what tools are available?"
|
| 311 |
-
• Batch Analysis: "run batch analysis"
|
| 312 |
-
|
| 313 |
-
Example: "analyze: I love this product!" """
|
| 314 |
-
|
| 315 |
-
agent = SimpleAgent(tools, model)
|
| 316 |
-
logger.info("Simple agent wrapper created successfully")
|
| 317 |
-
|
| 318 |
-
logger.info("MCP client and agent initialized successfully!")
|
| 319 |
-
return True
|
| 320 |
-
|
| 321 |
-
def initialize_mcp_client():
|
| 322 |
-
"""Initialize MCP client and connect to the local server"""
|
| 323 |
-
try:
|
| 324 |
-
success = run_async_in_thread(_initialize_mcp_client())
|
| 325 |
-
return success
|
| 326 |
-
except Exception as e:
|
| 327 |
-
logger.error(f"Failed to initialize MCP client: {str(e)}")
|
| 328 |
-
return False
|
| 329 |
-
|
| 330 |
-
# ============================================================================
|
| 331 |
-
# GRADIO INTERFACE FUNCTIONS
|
| 332 |
-
# ============================================================================
|
| 333 |
-
|
| 334 |
-
async def _process_question_async(question: str):
|
| 335 |
-
"""Async helper for processing questions"""
|
| 336 |
-
global agent
|
| 337 |
-
|
| 338 |
-
if not agent:
|
| 339 |
-
return "❌ Agent not initialized. Please check MCP server connection."
|
| 340 |
-
|
| 341 |
-
logger.info(f"Processing question: {question}")
|
| 342 |
-
response = await agent.run(question)
|
| 343 |
-
logger.info("Question processed successfully")
|
| 344 |
-
return str(response)
|
| 345 |
-
|
| 346 |
-
def process_question(question: str, history: list) -> tuple:
|
| 347 |
-
"""Process user question using the MCP-enabled agent"""
|
| 348 |
-
|
| 349 |
-
if not question.strip():
|
| 350 |
-
error_msg = "⚠️ Please enter a question."
|
| 351 |
-
history.append([question, error_msg])
|
| 352 |
-
return history, ""
|
| 353 |
-
|
| 354 |
-
try:
|
| 355 |
-
# Add user question to history with thinking indicator
|
| 356 |
-
history.append([question, "🤔 Thinking..."])
|
| 357 |
-
|
| 358 |
-
# Process the question in a separate thread
|
| 359 |
-
response = run_async_in_thread(_process_question_async(question))
|
| 360 |
-
|
| 361 |
-
# Update history with the response
|
| 362 |
-
history[-1][1] = f"🤖 {response}"
|
| 363 |
-
|
| 364 |
-
except Exception as e:
|
| 365 |
-
error_msg = f"❌ Error processing question: {str(e)}"
|
| 366 |
-
history[-1][1] = error_msg
|
| 367 |
-
logger.error(f"Error processing question: {str(e)}")
|
| 368 |
-
|
| 369 |
-
return history, ""
|
| 370 |
-
|
| 371 |
-
def get_server_status() -> str:
|
| 372 |
-
"""Get current server connection status"""
|
| 373 |
-
global agent, tools, mcp_client
|
| 374 |
-
|
| 375 |
-
status_parts = []
|
| 376 |
-
|
| 377 |
-
# MCP Client Status
|
| 378 |
-
if mcp_client:
|
| 379 |
-
status_parts.append("✅ MCP Client: Connected")
|
| 380 |
-
else:
|
| 381 |
-
status_parts.append("❌ MCP Client: Not connected")
|
| 382 |
-
|
| 383 |
-
# Tools Status
|
| 384 |
-
if tools:
|
| 385 |
-
status_parts.append(f"✅ Tools: {len(tools)} available")
|
| 386 |
-
for i, tool in enumerate(tools[:5], 1):
|
| 387 |
-
tool_name = getattr(tool, 'name', f'tool_{i}')
|
| 388 |
-
tool_desc = getattr(tool, 'description', 'No description')
|
| 389 |
-
status_parts.append(f" • {tool_name}: {tool_desc[:50]}...")
|
| 390 |
-
if len(tools) > 5:
|
| 391 |
-
status_parts.append(f" ... and {len(tools) - 5} more tools")
|
| 392 |
-
else:
|
| 393 |
-
status_parts.append("❌ Tools: None available")
|
| 394 |
-
|
| 395 |
-
# Agent Status
|
| 396 |
-
if agent:
|
| 397 |
-
status_parts.append("✅ Agent: Ready")
|
| 398 |
-
else:
|
| 399 |
-
status_parts.append("❌ Agent: Not initialized")
|
| 400 |
-
|
| 401 |
-
status_parts.append(f"🔗 Server URL: {MCP_SERVER_URL}")
|
| 402 |
-
return "\n".join(status_parts)
|
| 403 |
-
|
| 404 |
-
def reconnect_to_server():
|
| 405 |
-
"""Attempt to reconnect to the MCP server"""
|
| 406 |
-
try:
|
| 407 |
-
success = initialize_mcp_client()
|
| 408 |
-
if success:
|
| 409 |
-
return "✅ Successfully reconnected to MCP server!"
|
| 410 |
-
else:
|
| 411 |
-
return "❌ Failed to reconnect. Please check if the MCP server is running."
|
| 412 |
-
except Exception as e:
|
| 413 |
-
return f"❌ Reconnection error: {str(e)}"
|
| 414 |
-
|
| 415 |
-
# ============================================================================
|
| 416 |
-
# GRADIO INTERFACE CREATION
|
| 417 |
-
# ============================================================================
|
| 418 |
-
|
| 419 |
-
def create_gradio_interface():
|
| 420 |
-
"""Create and configure the Gradio interface"""
|
| 421 |
-
|
| 422 |
-
css = """
|
| 423 |
-
.gradio-container {
|
| 424 |
-
max-width: 1200px !important;
|
| 425 |
-
}
|
| 426 |
-
.status-box {
|
| 427 |
-
background-color: #f8f9fa;
|
| 428 |
-
border: 1px solid #dee2e6;
|
| 429 |
-
border-radius: 8px;
|
| 430 |
-
padding: 15px;
|
| 431 |
-
font-family: monospace;
|
| 432 |
-
font-size: 12px;
|
| 433 |
-
white-space: pre-line;
|
| 434 |
-
}
|
| 435 |
-
"""
|
| 436 |
-
|
| 437 |
-
with gr.Blocks(
|
| 438 |
-
title="MCP Sentiment Analysis Client v2",
|
| 439 |
-
theme=gr.themes.Soft(),
|
| 440 |
-
css=css
|
| 441 |
-
) as interface:
|
| 442 |
-
|
| 443 |
-
# Header
|
| 444 |
-
gr.Markdown("""
|
| 445 |
-
# 🎭 MCP Sentiment Analysis Client v2
|
| 446 |
-
|
| 447 |
-
**Improved Version** - Connect to your local MCP server using SmolAgents for AI-powered sentiment analysis.
|
| 448 |
-
""")
|
| 449 |
-
|
| 450 |
-
with gr.Row():
|
| 451 |
-
with gr.Column(scale=2):
|
| 452 |
-
# Main chat interface
|
| 453 |
-
chatbot = gr.Chatbot(
|
| 454 |
-
label="Chat with MCP Agent",
|
| 455 |
-
height=500,
|
| 456 |
-
show_label=True,
|
| 457 |
-
container=True
|
| 458 |
-
)
|
| 459 |
-
|
| 460 |
-
with gr.Row():
|
| 461 |
-
question_input = gr.Textbox(
|
| 462 |
-
placeholder="Ask about sentiment analysis (e.g., 'Analyze: I love this product!')",
|
| 463 |
-
label="Your Question",
|
| 464 |
-
lines=2,
|
| 465 |
-
scale=4
|
| 466 |
-
)
|
| 467 |
-
submit_btn = gr.Button("Submit", variant="primary", scale=1)
|
| 468 |
-
|
| 469 |
-
# Quick action buttons
|
| 470 |
-
with gr.Row():
|
| 471 |
-
gr.Markdown("### 💡 Quick Actions:")
|
| 472 |
-
|
| 473 |
-
with gr.Row():
|
| 474 |
-
examples = [
|
| 475 |
-
"Analyze: 'I love this!'",
|
| 476 |
-
"Analyze: 'This is terrible'",
|
| 477 |
-
"What tools are available?",
|
| 478 |
-
"Health check"
|
| 479 |
-
]
|
| 480 |
-
|
| 481 |
-
for example in examples:
|
| 482 |
-
btn = gr.Button(example, size="sm", scale=1)
|
| 483 |
-
btn.click(
|
| 484 |
-
lambda x=example: x,
|
| 485 |
-
outputs=question_input
|
| 486 |
-
)
|
| 487 |
-
|
| 488 |
-
with gr.Column(scale=1):
|
| 489 |
-
# Server status and controls
|
| 490 |
-
gr.Markdown("### 🔧 Server Status")
|
| 491 |
-
|
| 492 |
-
status_display = gr.Textbox(
|
| 493 |
-
label="Connection Status",
|
| 494 |
-
lines=12,
|
| 495 |
-
interactive=False,
|
| 496 |
-
elem_classes=["status-box"]
|
| 497 |
-
)
|
| 498 |
-
|
| 499 |
-
with gr.Row():
|
| 500 |
-
refresh_btn = gr.Button("🔄 Refresh", size="sm")
|
| 501 |
-
reconnect_btn = gr.Button("🔌 Reconnect", size="sm", variant="secondary")
|
| 502 |
-
|
| 503 |
-
# Information panel
|
| 504 |
-
gr.Markdown("""
|
| 505 |
-
### ℹ️ Quick Guide
|
| 506 |
-
|
| 507 |
-
**Example Questions:**
|
| 508 |
-
- "Analyze the sentiment of: [your text]"
|
| 509 |
-
- "What's the sentiment of multiple texts?"
|
| 510 |
-
- "Check system health"
|
| 511 |
-
- "What tools do you have?"
|
| 512 |
-
|
| 513 |
-
**Tips:**
|
| 514 |
-
- Be specific with your requests
|
| 515 |
-
- Wait for responses (may take 10-30 seconds)
|
| 516 |
-
- Use the reconnect button if you see errors
|
| 517 |
-
""")
|
| 518 |
-
|
| 519 |
-
# Event handlers
|
| 520 |
-
submit_btn.click(
|
| 521 |
-
process_question,
|
| 522 |
-
inputs=[question_input, chatbot],
|
| 523 |
-
outputs=[chatbot, question_input]
|
| 524 |
-
)
|
| 525 |
-
|
| 526 |
-
question_input.submit(
|
| 527 |
-
process_question,
|
| 528 |
-
inputs=[question_input, chatbot],
|
| 529 |
-
outputs=[chatbot, question_input]
|
| 530 |
-
)
|
| 531 |
-
|
| 532 |
-
refresh_btn.click(
|
| 533 |
-
get_server_status,
|
| 534 |
-
outputs=status_display
|
| 535 |
-
)
|
| 536 |
-
|
| 537 |
-
reconnect_btn.click(
|
| 538 |
-
reconnect_to_server,
|
| 539 |
-
outputs=status_display
|
| 540 |
-
)
|
| 541 |
-
|
| 542 |
-
# Initialize status display on load
|
| 543 |
-
interface.load(
|
| 544 |
-
get_server_status,
|
| 545 |
-
outputs=status_display
|
| 546 |
-
)
|
| 547 |
-
|
| 548 |
-
return interface
|
| 549 |
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
# ============================================================================
|
| 553 |
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
# Initialize MCP client and agent
|
| 561 |
-
print("⚙️ Initializing MCP client and agent...")
|
| 562 |
-
success = initialize_mcp_client()
|
| 563 |
-
|
| 564 |
-
if success:
|
| 565 |
-
print("✅ Initialization successful!")
|
| 566 |
-
else:
|
| 567 |
-
print("⚠️ Initialization failed, but interface will still launch.")
|
| 568 |
-
print(" You can try reconnecting using the interface.")
|
| 569 |
-
|
| 570 |
-
# Create and launch Gradio interface
|
| 571 |
-
print("🌐 Creating Gradio interface...")
|
| 572 |
-
interface = create_gradio_interface()
|
| 573 |
-
|
| 574 |
-
print("🎉 Launching interface...")
|
| 575 |
-
print("📱 Access the interface at: http://localhost:7862")
|
| 576 |
-
print("🛑 Press Ctrl+C to stop the server")
|
| 577 |
-
|
| 578 |
-
# Launch the interface
|
| 579 |
-
interface.launch(
|
| 580 |
-
server_name="0.0.0.0",
|
| 581 |
-
server_port=7862,
|
| 582 |
-
share=False,
|
| 583 |
-
debug=False,
|
| 584 |
-
show_error=True
|
| 585 |
)
|
| 586 |
|
| 587 |
-
|
| 588 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
|
|
|
| 4 |
from smolagents import InferenceClientModel, CodeAgent, MCPClient
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
|
|
|
| 7 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
mcp_client = MCPClient(
|
| 9 |
+
{"url": os.environ["MCP_SERVER_URL"]}
|
| 10 |
)
|
| 11 |
+
tools = mcp_client.get_tools()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
model = InferenceClientModel(token=os.environ["HF_TOKEN"])
|
| 14 |
+
agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])
|
|
|
|
| 15 |
|
| 16 |
+
demo = gr.ChatInterface(
|
| 17 |
+
fn=lambda message, history: str(agent.run(message)),
|
| 18 |
+
type="messages",
|
| 19 |
+
examples=["Analyze the sentiment of the following text 'This is awesome'"],
|
| 20 |
+
title="Agent with MCP Tools",
|
| 21 |
+
description="This is a simple agent that uses MCP tools to answer questions.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
)
|
| 23 |
|
| 24 |
+
demo.launch()
|
| 25 |
+
finally:
|
| 26 |
+
mcp_client.disconnect()
|