Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| import streamlit as st | |
| from transformers import pipeline | |
| from helpers.foundation_models import * | |
| openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| with st.expander("Instructions"): | |
| st.sidebar.markdown( | |
| r""" | |
| # π Streamlit + Hugging Face Demo π€ | |
| ## Introduction π | |
| This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. | |
| """ | |
| ) | |
| option = st.sidebar.selectbox( | |
| "Which task do you want to do?", | |
| ("Sentiment Analysis", "Medical Summarization", "ChatGPT"), | |
| ) | |
| clear_button = st.sidebar.button("Clear Conversation", key="clear") | |
| # Reset everything | |
| if clear_button: | |
| st.session_state.messages = [] | |
| # React to user input | |
| if prompt := st.chat_input("What is up?"): | |
| # Display user message in chat message container | |
| st.chat_message("user").markdown(prompt) | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.spinner("Wait for it..."): | |
| if option == "Sentiment Analysis": | |
| pipe_sentiment_analysis = pipeline("sentiment-analysis") | |
| if prompt: | |
| out = pipe_sentiment_analysis(prompt) | |
| doc = f""" | |
| Prompt: {prompt} | |
| Sentiment: {out[0]["label"]} | |
| Score: {out[0]["score"]} | |
| """ | |
| elif option == "Medical Summarization": | |
| pipe_summarization = pipeline( | |
| "summarization", model="Falconsai/medical_summarization" | |
| ) | |
| if prompt: | |
| out = pipe_summarization(prompt) | |
| doc = out[0]["summary_text"] | |
| elif option == "ChatGPT": | |
| if prompt: | |
| out = call_chatgpt(query=prompt) | |
| doc = out | |
| else: | |
| doc = "" | |
| response = f"{doc}" | |
| # Display assistant response in chat message container | |
| with st.chat_message("assistant"): | |
| st.markdown(response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |