| | from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool |
| | import datetime |
| | import requests |
| | import pytz |
| | import yaml |
| | from tools.final_answer import FinalAnswerTool |
| | from diffusers import StableDiffusionPipeline |
| | import torch |
| |
|
| | from tools.final_answer import FinalAnswerTool |
| | from tools.visit_webpage import VisitWebpageTool |
| | from tools.web_search import DuckDuckGoSearchTool |
| | from Gradio_UI import GradioUI |
| |
|
| | @tool |
| | def get_current_time_in_timezone(timezone: str) -> str: |
| | """A tool that fetches the current local time in a specified timezone. |
| | Args: |
| | timezone: A string representing a valid timezone (e.g., 'America/New_York'). |
| | """ |
| | try: |
| | |
| | tz = pytz.timezone(timezone) |
| | |
| | local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") |
| | return f"The current local time in {timezone} is: {local_time}" |
| | except Exception as e: |
| | return f"Error fetching time for timezone '{timezone}': {str(e)}" |
| |
|
| | @tool |
| | def image_generator(prompt: str, model_name: str = "CompVis/stable-diffusion-v1-4"): |
| | """A tool that Generate an image from a text prompt using a free Hugging Face model. |
| | |
| | Args: |
| | prompt (str): Text prompt to generate image. |
| | model_name (str): Model to use from Hugging Face hub (default is Stable Diffusion v1.4). |
| | |
| | Returns: |
| | PIL.Image.Image: Generated image. |
| | """ |
| | pipe = StableDiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.float16) |
| | pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
| |
|
| | image = pipe(prompt).images[0] |
| | return image |
| |
|
| | final_answer = FinalAnswerTool() |
| | visit_webpage = VisitWebpageTool() |
| | web_search = DuckDuckGoSearchTool() |
| |
|
| | |
| | |
| |
|
| | model = HfApiModel( |
| | max_tokens=2096, |
| | temperature=0.5, |
| | model_id='Qwen/Qwen2.5-Coder-32B-Instruct', |
| | custom_role_conversions=None, |
| | ) |
| |
|
| |
|
| | |
| | |
| |
|
| | with open("prompts.yaml", 'r') as stream: |
| | prompt_templates = yaml.safe_load(stream) |
| | |
| | agent = CodeAgent( |
| | model=model, |
| | tools=[final_answer, visit_webpage, web_search, get_current_time_in_timezone, image_generator], |
| | max_steps=6, |
| | verbosity_level=1, |
| | grammar=None, |
| | planning_interval=None, |
| | name=None, |
| | description=None, |
| | prompt_templates=prompt_templates |
| | ) |
| |
|
| |
|
| | GradioUI(agent).launch() |