Spaces:
Sleeping
Sleeping
| import os | |
| from typing import List, Tuple, Dict, Any | |
| import openai | |
| import streamlit as st | |
| from langchain.agents import AgentType, initialize_agent, load_tools | |
| from langchain.llms import OpenAI as l_OpenAI | |
| import requests | |
| OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] | |
| SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"] | |
| openai_client = openai.OpenAI(api_key=OPENAI_API_KEY) | |
| def call_chatgpt(query: str, model: str = "gpt-3.5-turbo") -> str: | |
| """ | |
| Generates a response to a query using the specified language model. | |
| Args: | |
| query (str): The user's query that needs to be processed. | |
| model (str, optional): The language model to be used. Defaults to "gpt-3.5-turbo". | |
| Returns: | |
| str: The generated response to the query. | |
| """ | |
| # Prepare the conversation context with system and user messages. | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": f"Question: {query}."}, | |
| ] | |
| # Use the OpenAI client to generate a response based on the model and the conversation context. | |
| response = openai_client.chat.completions.create( | |
| model=model, | |
| messages=messages, | |
| ) | |
| # Extract the content of the response from the first choice. | |
| content: str = response.choices[0].message.content | |
| # Return the generated content. | |
| return content | |
| def call_langchain(prompt: str) -> str: | |
| """ | |
| Initializes a language model with specific settings, loads additional tools, initializes an agent with these tools, | |
| and then runs the agent with a given prompt to produce a text response. | |
| Args: | |
| prompt (str): The input text prompt that the agent will process. | |
| Returns: | |
| str: The text output produced by the agent after processing the input prompt. | |
| """ | |
| # Initialize the OpenAI language model with a specified temperature | |
| # and the OpenAI API key. It's assumed that `l_OpenAI` is a class or function | |
| # that is responsible for setting up the language model with the given parameters. | |
| llm = l_OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY) # Type: ignore | |
| # Load additional tools needed for the agent. Here, 'serpapi' for search engine results | |
| # and 'llm-math' for math capabilities are loaded, along with their respective API keys. | |
| # The `load_tools` function is assumed to return a dictionary of initialized tools, | |
| # with the tools being ready for use by the agent. | |
| tools = load_tools( # Type: ignore | |
| ["serpapi", "llm-math"], llm=llm, serpapi_api_key=SERPAPI_API_KEY | |
| ) | |
| # Initialize the agent with the provided tools, the language model, and specific agent settings. | |
| # The agent is set to a ZERO_SHOT_REACT_DESCRIPTION type, which likely defines its behavior | |
| # or capabilities, with verbosity enabled for detailed logs. | |
| # The `initialize_agent` function presumably returns an instance of an agent configured | |
| # with the specified tools and settings, ready to process prompts. | |
| agent = initialize_agent( # Type: ignore | |
| tools, llm, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True | |
| ) | |
| # Execute the agent with the given prompt and capture the output. | |
| # The `run` method of the agent is assumed to process the prompt and return a string response, | |
| # which is then returned by this function to the caller. | |
| output: str = agent.run(prompt) | |
| return output | |
| def query(payload: Dict[str, Any]) -> Dict[str, Any]: | |
| """ | |
| Sends a JSON payload to a predefined API URL and returns the JSON response. | |
| Args: | |
| payload (Dict[str, Any]): The JSON payload to be sent to the API. | |
| Returns: | |
| Dict[str, Any]: The JSON response received from the API. | |
| """ | |
| # API endpoint URL | |
| API_URL = "https://sks7h7h5qkhoxwxo.us-east-1.aws.endpoints.huggingface.cloud" | |
| # Headers to indicate both the request and response formats are JSON | |
| headers = { | |
| "Accept": "application/json", | |
| "Content-Type": "application/json" | |
| } | |
| # Sending a POST request with the JSON payload and headers | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| # Returning the JSON response | |
| return response.json() | |
| def llama2_7b_ysa(prompt: str) -> str: | |
| """ | |
| Queries a model and retrieves the generated text based on the given prompt. | |
| This function sends a prompt to a model (presumably named 'llama2_7b') and extracts | |
| the generated text from the model's response. It's tailored for handling responses | |
| from a specific API or model query structure where the response is expected to be | |
| a list of dictionaries, with at least one dictionary containing a key 'generated_text'. | |
| Parameters: | |
| - prompt (str): The text prompt to send to the model. | |
| Returns: | |
| - str: The generated text response from the model. | |
| Note: | |
| - The function assumes that the 'query' function is previously defined and accessible | |
| within the same scope or module. It should send a request to the model and return | |
| the response in a structured format. | |
| - The 'parameters' dictionary is passed empty but can be customized to include specific | |
| request parameters as needed by the model API. | |
| """ | |
| # Define the query payload with the prompt and any additional parameters | |
| query_payload: Dict[str, Any] = { | |
| "inputs": prompt, | |
| "parameters": {} | |
| } | |
| # Send the query to the model and store the output response | |
| output = query(query_payload) | |
| # Extract the 'generated_text' from the first item in the response list | |
| response: str = output[0]['generated_text'] | |
| return response | |
| def llama2_7b_brk_letters(prompt: str) -> str: | |
| """ | |
| Queries the LLaMA 2 7B model hosted on a specific Hugging Face endpoint with a given prompt, | |
| and returns the generated text as a response. | |
| Args: | |
| prompt (str): The input text prompt to be sent to the LLaMA 2 7B model for generating text. | |
| Returns: | |
| str: The text generated by the LLaMA 2 7B model in response to the input prompt. | |
| """ | |
| # Endpoint URL of the LLaMA 2 7B model hosted on Hugging Face. | |
| API_URL = "https://fjnshtq0d24uia40.us-east-1.aws.endpoints.huggingface.cloud" | |
| # Headers to include in the HTTP request. Specifies the expected format of the response and request. | |
| headers = { | |
| "Accept": "application/json", | |
| "Content-Type": "application/json" | |
| } | |
| def query_llama2_7b_brk_letters(payload: dict) -> dict: | |
| """ | |
| Sends a POST request to the LLaMA 2 7B API endpoint with a given payload. | |
| Args: | |
| payload (dict): The data to be sent in the POST request, including the input prompt | |
| and any parameters for the model. | |
| Returns: | |
| dict: The JSON response from the API, parsed into a dictionary. | |
| """ | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| # The payload for the POST request, including the prompt and any model parameters. | |
| output = query_llama2_7b_brk_letters({ | |
| "inputs": prompt, | |
| "parameters": {} | |
| }) | |
| # Extracts the generated text from the API's response. | |
| response = output[0]['generated_text'] | |
| return response | |