File size: 3,078 Bytes
3206860 298cd0d a54cc06 3206860 a677c33 3206860 298cd0d a54cc06 3206860 a677c33 a54cc06 3206860 298cd0d a677c33 298cd0d a677c33 a54cc06 a677c33 a54cc06 76762cf 3206860 a54cc06 a677c33 a54cc06 a677c33 a54cc06 3206860 a54cc06 3206860 a54cc06 3206860 a677c33 a54cc06 298cd0d a677c33 a54cc06 3206860 a54cc06 3206860 a54cc06 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
# spooky_8ball_ai_cpu.py
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModel
from datetime import datetime
import time
st.set_page_config(page_title="AI Spooky 8-Ball", layout="centered")
st.title("๐ฎ AI Spooky Magic 8-Ball (CPU)")
st.markdown(
"""
Ask the **AI Spooky Magic 8-Ball** a yes/no question and receive a **dynamic, spooky, or silly** answer.
Uses **Qwen-2.5 0.5B**, CPU-friendly.
"""
)
# ----------------------------
# --- Load Model ---
# ----------------------------
@st.cache_resource(show_spinner=True)
def load_model():
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
model.to("cpu") # CPU only
return tokenizer, model
tokenizer, model = load_model()
# ----------------------------
# --- Functions ---
# ----------------------------
def generate_spooky_answer(question, max_length=150, temperature=0.8):
prompt = f"""You are a spooky, slightly scary, sometimes funny Magic 8-Ball.
Answer the following yes/no question in 1-3 sentences, making it mysterious or eerie, sometimes silly:
Question: "{question}"
Answer:"""
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
start_time = time.time()
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=max_length,
temperature=temperature,
do_sample=True,
top_p=0.9
)
end_time = time.time()
inference_time = end_time - start_time
answer = tokenizer.decode(output[0], skip_special_tokens=True)
answer = answer.split("Answer:")[-1].strip()
return answer, inference_time
# ----------------------------
# --- Streamlit Form ---
# ----------------------------
with st.form("ai_8ball_form"):
question = st.text_input("Ask a yes/no question", placeholder="Will I survive tonight?")
submitted = st.form_submit_button("Ask AI Spooky 8-Ball")
if submitted:
if not question.strip():
st.warning("Please enter a question!")
else:
answer, inference_time = generate_spooky_answer(question)
st.subheader("Your AI Spooky 8-Ball Answer")
st.write(f"**Question:** {question}")
st.write(f"**Answer:** {answer}")
st.write(f"*Inference time:* {inference_time:.2f} seconds")
st.write(f"*Generated at (UTC):* {datetime.utcnow().isoformat()}Z")
# Help button
st.markdown(
f"""
<a href="mailto:[email protected]?subject=SmilyAI%20AI%208-Ball%20Help&body=Hello%2C%0A%0AI%20need%20assistance%20with%20the AI Spooky 8-Ball.%0A%0AQuestion:%20{question}" target="_blank">
<button style="width:100%;height:50px;font-size:20px;color:white;background-color:#FF4B4B;border:none;border-radius:8px;cursor:pointer;">
๐ Need Help? Contact SmilyAI
</button>
</a>
""",
unsafe_allow_html=True
)
|