BTP_chatbot / app.py
aaryan3781's picture
Update app.py
211feba
raw
history blame
7.27 kB
import gradio as gr
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
from rank_bm25 import BM25Okapi
import sys
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks import get_openai_callback
import openai
import time
import pandas as pd
import random
import os
import csv
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import os
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import tempfile
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
import os
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import os
# Import Azure OpenAI
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks import get_openai_callback
import os
import openai
# Import Azure OpenAI
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks import get_openai_callback
import sys
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks import get_openai_callback
import openai
import time
import pandas as pd
import random
import os
import csv
# france credentials
BASE_URL = "https://cnerg-gpt-france.openai.azure.com/"
API_KEY = "1b68ed6e70d044558517db7721de6fac"
DEPLOYMENT_NAME = "GPT-4-France"
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
os.environ["GOOGLE_CSE_ID"] = "67517d07b1ea049f6"
os.environ["GOOGLE_API_KEY"] = "AIzaSyAkU0I5NcrKIPMtB2Ry28Mu9umJA4Rw4UE"
search = GoogleSearchAPIWrapper()
import os
def top10_results(query):
return search.results(query, 10)
tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=top10_results,
)
def search_results(input_text):
raw_text_list = tool.run(input_text)
return raw_text_list
def Bm25(raw_text_list,input_text,n) :
corpus = [item['snippet'] for item in raw_text_list]
tokenized_corpus = [doc.split(" ") for doc in corpus]
bm25 = BM25Okapi(tokenized_corpus)
query = input_text
tokenized_query = query.split(" ")
doc_scores = bm25.get_scores(tokenized_query)
top_5_results = bm25.get_top_n(tokenized_query, corpus, n=n)
results = '\n'.join(top_5_results)
combined_input = "query = " + input_text + "\n\n For the above query these are some results from a search engine: \n ".join(results) + "\n\n Give detailed and brief answer for the query write in 500-1000 words. Give detailed and well informative answer(include calculations if needed, using tables and other styles of structuring is optional for better answering ) "
return combined_input
# Define your functions here
def gpt4(input_text,one_shot_example):
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
if len(one_shot_example)==0:
combined_input = f"please provide comprehensive and well-researched responses to the following question. Ensure that the information is up-to-date and includes relevant scientific insights and data , question : {input_text}"
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
else:
combined_input = f"please provide comprehensive and well-researched responses to the following question. Ensure that the information is up-to-date and includes relevant scientific insights and data ,Below is a example question-answer pair for reference\n\n {one_shot_example} \n\n Now answer this question \n\n question :{input_text}"
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
def function2(input_text, one_shot_example):
# Your logic for function 2
return f"Output of Function 2 with input: {input_text} and one shot example: {one_shot_example}"
def function3(input_text,one_shot_example,n):
n=int(n)
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
k=search_results(input_text)
k=Bm25(k,input_text,n)
if len(one_shot_example)==0:
combined_input = k
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
else:
combined_input = k+f"\n\n Here is a sample question answer pair for reference :\n\n {one_shot_example} "
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
def function4(input_text, one_shot_example, n):
n=int(n)
# Your logic for function 4
return f"Output of Function 4 with input: {input_text}, one shot example: {one_shot_example} and parameter: {parameter}"
# Define the dropdown options
dropdown_options = ["1", "2", "3"]
# Create individual interfaces for each function
# iface1 = gr.Interface(gpt4, inputs="text", outputs="text")
iface2 = gr.Interface(gpt4, inputs=["text", "text"], outputs="text")
iface3 = gr.Interface(
function3,
inputs=[
gr.Textbox(label="Input Text"),
gr.Textbox(label="One Shot Example"),
gr.Dropdown(choices=dropdown_options, label="Number of top search results")
],
outputs="text"
)
iface4 = gr.Interface(
function4,
inputs=[
gr.Textbox(label="Input Text"),
gr.Textbox(label="One Shot Example"),
gr.Dropdown(choices=dropdown_options, label="Select K")
],
outputs="text"
)
# Create a parallel interface that combines all individual interfaces
iface = gr.TabbedInterface([iface2, iface3, iface4],
tab_names=["GPT-4 ", "GPT 4 search", "GPT 4 BM25"])
# Launch the interface
if __name__ == "__main__":
iface.launch()