Spaces:
Sleeping
Sleeping
File size: 7,271 Bytes
6d75d38 13e69a3 211feba ca7ac3a 6d75d38 13e69a3 ca7ac3a 6d75d38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
import gradio as gr
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
from rank_bm25 import BM25Okapi
import sys
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks import get_openai_callback
import openai
import time
import pandas as pd
import random
import os
import csv
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import os
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import tempfile
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
import os
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import os
# Import Azure OpenAI
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks import get_openai_callback
import os
import openai
# Import Azure OpenAI
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks import get_openai_callback
import sys
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks import get_openai_callback
import openai
import time
import pandas as pd
import random
import os
import csv
# france credentials
BASE_URL = "https://cnerg-gpt-france.openai.azure.com/"
API_KEY = "1b68ed6e70d044558517db7721de6fac"
DEPLOYMENT_NAME = "GPT-4-France"
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
os.environ["GOOGLE_CSE_ID"] = "67517d07b1ea049f6"
os.environ["GOOGLE_API_KEY"] = "AIzaSyAkU0I5NcrKIPMtB2Ry28Mu9umJA4Rw4UE"
search = GoogleSearchAPIWrapper()
import os
def top10_results(query):
return search.results(query, 10)
tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=top10_results,
)
def search_results(input_text):
raw_text_list = tool.run(input_text)
return raw_text_list
def Bm25(raw_text_list,input_text,n) :
corpus = [item['snippet'] for item in raw_text_list]
tokenized_corpus = [doc.split(" ") for doc in corpus]
bm25 = BM25Okapi(tokenized_corpus)
query = input_text
tokenized_query = query.split(" ")
doc_scores = bm25.get_scores(tokenized_query)
top_5_results = bm25.get_top_n(tokenized_query, corpus, n=n)
results = '\n'.join(top_5_results)
combined_input = "query = " + input_text + "\n\n For the above query these are some results from a search engine: \n ".join(results) + "\n\n Give detailed and brief answer for the query write in 500-1000 words. Give detailed and well informative answer(include calculations if needed, using tables and other styles of structuring is optional for better answering ) "
return combined_input
# Define your functions here
def gpt4(input_text,one_shot_example):
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
if len(one_shot_example)==0:
combined_input = f"please provide comprehensive and well-researched responses to the following question. Ensure that the information is up-to-date and includes relevant scientific insights and data , question : {input_text}"
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
else:
combined_input = f"please provide comprehensive and well-researched responses to the following question. Ensure that the information is up-to-date and includes relevant scientific insights and data ,Below is a example question-answer pair for reference\n\n {one_shot_example} \n\n Now answer this question \n\n question :{input_text}"
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
def function2(input_text, one_shot_example):
# Your logic for function 2
return f"Output of Function 2 with input: {input_text} and one shot example: {one_shot_example}"
def function3(input_text,one_shot_example,n):
n=int(n)
model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
)
k=search_results(input_text)
k=Bm25(k,input_text,n)
if len(one_shot_example)==0:
combined_input = k
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
else:
combined_input = k+f"\n\n Here is a sample question answer pair for reference :\n\n {one_shot_example} "
generated_answer = model(
[
HumanMessage(
content=combined_input
)
]
)
return generated_answer.content
def function4(input_text, one_shot_example, n):
n=int(n)
# Your logic for function 4
return f"Output of Function 4 with input: {input_text}, one shot example: {one_shot_example} and parameter: {parameter}"
# Define the dropdown options
dropdown_options = ["1", "2", "3"]
# Create individual interfaces for each function
# iface1 = gr.Interface(gpt4, inputs="text", outputs="text")
iface2 = gr.Interface(gpt4, inputs=["text", "text"], outputs="text")
iface3 = gr.Interface(
function3,
inputs=[
gr.Textbox(label="Input Text"),
gr.Textbox(label="One Shot Example"),
gr.Dropdown(choices=dropdown_options, label="Number of top search results")
],
outputs="text"
)
iface4 = gr.Interface(
function4,
inputs=[
gr.Textbox(label="Input Text"),
gr.Textbox(label="One Shot Example"),
gr.Dropdown(choices=dropdown_options, label="Select K")
],
outputs="text"
)
# Create a parallel interface that combines all individual interfaces
iface = gr.TabbedInterface([iface2, iface3, iface4],
tab_names=["GPT-4 ", "GPT 4 search", "GPT 4 BM25"])
# Launch the interface
if __name__ == "__main__":
iface.launch()
|