GabrielSalem commited on
Commit
2bd3c6f
·
verified ·
1 Parent(s): ed61c38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -267
app.py CHANGED
@@ -1,37 +1,28 @@
1
- #!/usr/bin/env python3
2
  """
3
- AURA Chat — Gradio Space (final working version)
4
- - Single-file Gradio app (UI + pipeline).
5
- - Locked model, tokens, and scraper delay.
6
- - Users input prompts, press Analyze -> app scrapes -> LLM analyzes -> seeds chat.
7
- - Chat UI works across Gradio versions (we render tuples for display).
8
- - Robust client lifecycle and shutdown cleanup.
 
 
 
 
 
9
  """
10
 
11
  import os
12
- import sys
13
  import time
14
- import asyncio
15
  import requests
 
16
  import atexit
17
- import traceback
18
- import html
19
- import gc
20
- import socket
21
- from typing import List, Tuple, Dict, Any
22
 
23
  import gradio as gr
24
 
25
- # Defensive event loop setup early (reduces fd shutdown noise)
26
- if sys.platform != "win32":
27
- try:
28
- loop = asyncio.new_event_loop()
29
- asyncio.set_event_loop(loop)
30
- except Exception:
31
- traceback.print_exc()
32
-
33
  # -----------------------
34
- # Config (locked)
35
  # -----------------------
36
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
37
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
@@ -43,14 +34,8 @@ SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
43
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
44
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
45
 
46
- # Try import OpenAI SDK; if missing, we'll error cleanly on use
47
- try:
48
- from openai import OpenAI
49
- except Exception:
50
- OpenAI = None
51
-
52
  # -----------------------
53
- # System prompt (locked)
54
  # -----------------------
55
  PROMPT_TEMPLATE = f"""
56
  You are AURA, a concise, professional hedge-fund research assistant.
@@ -58,40 +43,35 @@ You are AURA, a concise, professional hedge-fund research assistant.
58
  Task:
59
  - Given scraped data below, produce a clear, readable analysis that:
60
  1) Lists the top 5 stock picks (or fewer if not enough data).
61
- 2) For each stock provide: Ticker / Company name, 2 short rationale bullets,
62
- and an explicit Investment Duration: one-line "When to Invest" and one-line "When to Sell".
63
- 3) Provide a 2–3 sentence summary conclusion at the top.
64
- 4) After the list, include "Assumptions & Risks" (23 bullets).
65
- 5) Keep it scannable and human-friendly; use numbered lists and bold headers. No JSON.
66
-
 
67
  Model: {LLM_MODEL}
68
- Max tokens: {MAX_TOKENS}
69
  """
70
 
71
  # -----------------------
72
- # Helpers: scraping & LLM
73
  # -----------------------
74
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
75
  payload = {"query": query}
76
  last_err = None
77
- for attempt in range(1, retries + 1):
78
  try:
79
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
80
  resp.raise_for_status()
81
  data = resp.json()
82
  if isinstance(data, dict):
83
- parts = []
84
- for k, v in data.items():
85
- parts.append(f"{k.upper()}:\n{v}\n")
86
- return "\n".join(parts)
87
- return str(data)
88
  except Exception as e:
89
  last_err = e
90
- if attempt < retries:
91
- time.sleep(1.0)
92
- else:
93
- return f"ERROR: Scraper failed: {e}"
94
- return f"ERROR: {last_err}"
95
 
96
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
97
  aggregated = []
@@ -100,269 +80,165 @@ def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
100
  if not q:
101
  continue
102
  aggregated.append(f"\n=== QUERY: {q} ===\n")
103
- scraped = deep_scrape(q)
104
- aggregated.append(scraped)
105
  time.sleep(delay)
106
  return "\n".join(aggregated)
107
 
108
- def call_llm(system_prompt: str, user_text: str, model: str = LLM_MODEL, max_tokens: int = MAX_TOKENS) -> str:
109
- # Validate dependencies & secrets
 
 
 
 
 
 
 
110
  if OpenAI is None:
111
- return "ERROR: openai package not installed. Add 'openai' to requirements."
112
  if not OPENAI_API_KEY:
113
  return "ERROR: OPENAI_API_KEY not set in environment."
114
 
115
- client = None
116
  try:
117
- client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
118
  completion = client.chat.completions.create(
119
- model=model,
120
- messages=[
121
- {"role": "system", "content": system_prompt},
122
- {"role": "user", "content": user_text},
123
- ],
124
- max_tokens=max_tokens,
125
  )
126
- if hasattr(completion, "choices") and len(completion.choices) > 0:
127
- try:
128
- return completion.choices[0].message.content
129
- except Exception:
130
- return str(completion.choices[0])
131
- return str(completion)
132
  except Exception as e:
133
  return f"ERROR: LLM call failed: {e}"
134
  finally:
135
- # Close client transport if possible
136
  try:
137
- if client is not None:
138
- try:
139
- client.close()
140
- except Exception:
141
- try:
142
- asyncio.get_event_loop().run_until_complete(client.aclose())
143
- except Exception:
144
- pass
145
  except Exception:
146
  pass
147
 
148
  # -----------------------
149
- # Pipeline: analyze & chat helpers
150
  # -----------------------
151
- def analyze_and_seed_chat(prompts_text: str) -> Tuple[str, List[Dict[str, str]]]:
152
- if not prompts_text or not prompts_text.strip():
153
- return "Please enter at least one data prompt (one per line).", []
154
 
155
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
156
- scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
157
  if scraped.startswith("ERROR"):
158
  return scraped, []
159
 
160
- user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
161
- analysis = call_llm(PROMPT_TEMPLATE, user_payload)
162
  if analysis.startswith("ERROR"):
163
  return analysis, []
164
 
 
165
  initial_chat = [
166
- {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
167
- {"role": "assistant", "content": analysis},
168
  ]
169
  return analysis, initial_chat
170
 
171
- def continue_chat(messages: List[Dict[str, str]], user_msg: str, analysis_text: str) -> List[Dict[str, str]]:
172
- if messages is None:
173
- messages = []
174
- if not user_msg or not user_msg.strip():
175
- return messages
176
-
177
- messages.append({"role": "user", "content": user_msg})
178
-
179
  followup_system = (
180
- "You are AURA, an analyst. Use the provided analysis as authoritative context. "
181
- "Answer the user's question referencing the analysis where appropriate. Be concise and actionable."
182
  )
183
- user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_msg}\n\nAnswer concisely."
184
- assistant_reply = call_llm(followup_system, user_payload)
185
- messages.append({"role": "assistant", "content": assistant_reply})
186
- return messages
187
-
188
- # -----------------------
189
- # Cleanup helper (reduces fd shutdown noise)
190
- # -----------------------
191
- def aggressive_cleanup_on_exit():
192
- try:
193
- gc.collect()
194
- except Exception:
195
- pass
196
- try:
197
- loop = asyncio.get_event_loop()
198
- if loop.is_running():
199
- try:
200
- loop.stop()
201
- except Exception:
202
- pass
203
- if not loop.is_closed():
204
- try:
205
- loop.close()
206
- except Exception:
207
- pass
208
- except Exception:
209
- pass
210
- try:
211
- for obj in gc.get_objects():
212
- try:
213
- if isinstance(obj, socket.socket):
214
- try:
215
- obj.close()
216
- except Exception:
217
- pass
218
- except Exception:
219
- pass
220
- except Exception:
221
- pass
222
-
223
- atexit.register(aggressive_cleanup_on_exit)
224
 
225
  # -----------------------
226
- # UI: build a beautiful responsive Gradio layout
227
  # -----------------------
228
- def messages_to_tuples(messages: List[Dict[str, str]]) -> List[Tuple[str, str]]:
229
- """
230
- Convert our internal list of {"role","content"} dicts into a list of (user, assistant) tuples
231
- suitable for gr.Chatbot. We group sequential messages by role into paired tuples.
232
- This ensures compatibility across Gradio versions.
233
- """
234
- if not messages:
235
- return []
236
- pairs: List[Tuple[str, str]] = []
237
- pending_user = None
238
- for m in messages:
239
- role = m.get("role")
240
- content = m.get("content", "")
241
- if role == "user":
242
- pending_user = content
243
- elif role == "assistant":
244
- # pair with last user if available, else pair with empty user
245
- user_text = pending_user or ""
246
- pairs.append((user_text, content))
247
- pending_user = None
248
- # If there is an unmatched user at the end, append it with empty assistant
249
- if pending_user:
250
- pairs.append((pending_user, ""))
251
- return pairs
252
-
253
  def build_ui():
254
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
255
- # Inject fonts & CSS
256
  gr.HTML("""
257
- <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700;800&display=swap" rel="stylesheet">
258
  <style>
259
- body, .gradio-container { font-family: Inter, system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial; background: linear-gradient(180deg,#051226 0%, #07102a 100%); color: #e6eef6; }
260
- .wrap { max-width:1100px; margin:18px auto; padding:12px; }
261
- .top { display:flex; justify-content:space-between; align-items:center; gap:12px; margin-bottom:12px; }
262
- .brand { display:flex; gap:12px; align-items:center; }
263
- .logo { width:52px; height:52px; border-radius:12px; display:flex; align-items:center; justify-content:center; font-weight:800; color:#021028; background:linear-gradient(135deg,#06b6d4,#10b981); box-shadow:0 10px 30px rgba(2,6,23,0.6); font-size:20px; }
264
- .title { font-size:20px; font-weight:700; margin:0; }
265
- .desc { color:#9fb0c8; font-size:13px; }
266
- .layout { display:flex; gap:16px; align-items:flex-start; }
267
- .left { width:360px; min-width:260px; }
268
- .panel { background: linear-gradient(180deg, rgba(255,255,255,0.02), rgba(255,255,255,0.01)); border-radius:12px; padding:14px; box-shadow:0 8px 40px rgba(2,6,23,0.6); border:1px solid rgba(255,255,255,0.03); }
269
- .muted { color:#9fb0c8; font-size:13px; }
270
- .pill { display:inline-block; padding:6px 10px; border-radius:999px; background:rgba(255,255,255,0.02); color:#bcd6e6; font-size:13px; margin-right:8px; }
271
- .analysis { min-height:220px; max-height:420px; overflow:auto; white-space:pre-wrap; font-family:Inter, monospace; font-size:14px; color:#dfeefc; }
272
- .chatbox { height:420px; overflow:auto; border-radius:10px; padding:8px; background: linear-gradient(180deg, rgba(255,255,255,0.01), rgba(255,255,255,0.005)); border:1px solid rgba(255,255,255,0.03); }
273
- @media (max-width:900px){ .layout { flex-direction:column; } .left { width:100%; } }
274
  </style>
275
  """)
276
 
277
- with gr.Row(elem_id="header"):
278
- with gr.Column():
279
- gr.HTML("<div class='wrap'><div class='top'><div class='brand'><div class='logo'>A</div><div><div class='title'>AURA — Hedge Fund Picks</div><div class='desc'>Scrape • Synthesize • Short actionable Investment Duration</div></div></div><div class='muted'>Model locked • Tokens locked</div></div></div>")
280
-
281
- with gr.Row(elem_classes="layout"):
282
- # Left column (inputs)
283
- with gr.Column(elem_classes="left"):
284
- with gr.Column(elem_classes="panel"):
285
- gr.Markdown("**Data Prompts**")
286
- prompts = gr.Textbox(lines=6, placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys", label=None)
287
- with gr.Row():
288
- analyze_btn = gr.Button("Analyze", variant="primary")
289
- clear_btn = gr.Button("Clear", variant="secondary")
290
- status = gr.Markdown("Status: Idle", elem_id="status")
291
- gr.HTML(f"<div style='margin-top:8px'><span class='pill'>Model: {LLM_MODEL}</span><span class='pill'>Tokens: {MAX_TOKENS}</span><span class='pill'>Delay: {SCRAPE_DELAY}s</span></div>")
292
-
293
- # Right column (analysis + chat)
294
- with gr.Column():
295
- with gr.Column(elem_classes="panel"):
296
- gr.Markdown("### Generated Analysis")
297
- analysis_html = gr.HTML("<div class='analysis muted'>No analysis yet. Enter prompts and click Analyze.</div>")
298
- gr.Markdown("### Chat (ask follow-ups)")
299
- chatbot = gr.Chatbot(label=None, elem_classes="chatbox")
300
- with gr.Row():
301
- user_input = gr.Textbox(placeholder="Ask a follow-up about the analysis...", label=None)
302
- send_btn = gr.Button("Send", variant="primary")
303
-
304
- # Hidden states
305
- analysis_state = gr.State("") # raw analysis text
306
- chat_state = gr.State([]) # list of {"role","content"} dicts
307
 
308
  # Handlers
309
- def set_status(text: str):
310
- return gr.update(value=f"Status: {text}")
311
-
312
- def on_clear():
313
- return "", gr.update(value="<div class='analysis muted'>No analysis yet. Enter prompts and click Analyze.</div>"), [], [], set_status("Cleared")
314
-
315
- def on_analyze(prompts_text: str):
316
- try:
317
- if not prompts_text or not prompts_text.strip():
318
- return "", gr.update(value="<div class='analysis muted'>Please enter at least one prompt.</div>"), [], [], set_status("Idle")
319
- status_update = set_status("Scraping...")
320
- # immediate UI update if front-end supports streaming (not guaranteed), then do work
321
- queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
322
- scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
323
- if scraped.startswith("ERROR"):
324
- return "", gr.update(value=f"<div class='analysis muted'><strong>Error:</strong> {html.escape(scraped)}</div>"), [], [], set_status("Scrape error")
325
-
326
- status_update = set_status("Generating analysis...")
327
- user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
328
- analysis_text = call_llm(PROMPT_TEMPLATE, user_payload)
329
- if analysis_text.startswith("ERROR"):
330
- return "", gr.update(value=f"<div class='analysis muted'><strong>Error:</strong> {html.escape(analysis_text)}</div>"), [], [], set_status("LLM error")
331
-
332
- safe_html = "<div class='analysis'>" + html.escape(analysis_text) + "</div>"
333
- initial_chat = [
334
- {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
335
- {"role": "assistant", "content": analysis_text},
336
- ]
337
- return analysis_text, gr.update(value=safe_html), initial_chat, initial_chat, set_status("Done")
338
- except Exception as e:
339
- tb = traceback.format_exc()
340
- return "", gr.update(value=f"<div class='analysis muted'><strong>Unexpected error:</strong> {html.escape(str(e))}</div>"), [], [], set_status("Error")
341
-
342
- def on_send(messages, user_msg, analysis_text):
343
- if not user_msg or not user_msg.strip():
344
- return messages or [], ""
345
- updated = continue_chat(messages or [], user_msg, analysis_text or "")
346
- return updated, ""
347
-
348
- def render_chat(messages):
349
- # Convert to tuple list for gr.Chatbot which expects [(user, assistant), ...]
350
- try:
351
- tuples = messages_to_tuples(messages or [])
352
- return tuples
353
- except Exception:
354
- # fallback: show empty
355
- return []
356
-
357
- # Wire UI
358
- analyze_btn.click(fn=on_analyze, inputs=[prompts], outputs=[analysis_state, analysis_html, chat_state, chatbot, status])
359
- clear_btn.click(fn=on_clear, inputs=[], outputs=[prompts, analysis_html, chat_state, chatbot, status])
360
-
361
- send_btn.click(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
362
- user_input.submit(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
363
-
364
- chat_state.change(fn=render_chat, inputs=[chat_state], outputs=[chatbot])
365
-
366
  return demo
367
 
368
  # -----------------------
 
 
1
  """
2
+ AURA Chat — Gradio Space
3
+ Single-file Gradio app that:
4
+ - Accepts newline-separated prompts (data queries) from the user.
5
+ - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM,
6
+ and returns a polished analysis with a ranked list of best stocks and an
7
+ "Investment Duration" for each stock.
8
+ - Seeds a chat component with the generated analysis; user can then chat about it.
9
+
10
+ Notes:
11
+ - Model, max tokens, and delay between scrapes are fixed.
12
+ - User only inputs prompts; everything else is predefined.
13
  """
14
 
15
  import os
 
16
  import time
 
17
  import requests
18
+ import asyncio
19
  import atexit
20
+ from typing import List
 
 
 
 
21
 
22
  import gradio as gr
23
 
 
 
 
 
 
 
 
 
24
  # -----------------------
25
+ # Configuration (fixed)
26
  # -----------------------
27
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
28
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
 
34
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
35
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
36
 
 
 
 
 
 
 
37
  # -----------------------
38
+ # Prompt engineering (fixed)
39
  # -----------------------
40
  PROMPT_TEMPLATE = f"""
41
  You are AURA, a concise, professional hedge-fund research assistant.
 
43
  Task:
44
  - Given scraped data below, produce a clear, readable analysis that:
45
  1) Lists the top 5 stock picks (or fewer if not enough data).
46
+ 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets),
47
+ and an explicit **Investment Duration** entry: one-line "When to Invest" and one-line "When to Sell".
48
+ 3) Keep each stock entry short and scannable. Use a bullet list or numbered list.
49
+ 4) At the top, provide a 2-3 sentence summary conclusion.
50
+ 5) Output in plain text, clean formatting.
51
+ 6) Include a concise "Assumptions & Risks" section (2-3 bullet points).
52
+ Max tokens for the LLM response: {MAX_TOKENS}
53
  Model: {LLM_MODEL}
 
54
  """
55
 
56
  # -----------------------
57
+ # Scraping
58
  # -----------------------
59
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
60
  payload = {"query": query}
61
  last_err = None
62
+ for attempt in range(retries):
63
  try:
64
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
65
  resp.raise_for_status()
66
  data = resp.json()
67
  if isinstance(data, dict):
68
+ return "\n".join(f"{k.upper()}:\n{v}" for k, v in data.items())
69
+ else:
70
+ return str(data)
 
 
71
  except Exception as e:
72
  last_err = e
73
+ time.sleep(1.0)
74
+ return f"ERROR: Scraper failed: {last_err}"
 
 
 
75
 
76
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
77
  aggregated = []
 
80
  if not q:
81
  continue
82
  aggregated.append(f"\n=== QUERY: {q} ===\n")
83
+ aggregated.append(deep_scrape(q))
 
84
  time.sleep(delay)
85
  return "\n".join(aggregated)
86
 
87
+ # -----------------------
88
+ # LLM call
89
+ # -----------------------
90
+ try:
91
+ from openai import OpenAI
92
+ except ImportError:
93
+ OpenAI = None
94
+
95
+ def run_llm_system_and_user(system_prompt: str, user_text: str) -> str:
96
  if OpenAI is None:
97
+ return "ERROR: `openai` package not installed."
98
  if not OPENAI_API_KEY:
99
  return "ERROR: OPENAI_API_KEY not set in environment."
100
 
101
+ client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
102
  try:
 
103
  completion = client.chat.completions.create(
104
+ model=LLM_MODEL,
105
+ messages=[{"role": "system", "content": system_prompt},
106
+ {"role": "user", "content": user_text}],
107
+ max_tokens=MAX_TOKENS
 
 
108
  )
109
+ return completion.choices[0].message.content
 
 
 
 
 
110
  except Exception as e:
111
  return f"ERROR: LLM call failed: {e}"
112
  finally:
 
113
  try:
114
+ client.close()
 
 
 
 
 
 
 
115
  except Exception:
116
  pass
117
 
118
  # -----------------------
119
+ # Analysis pipeline
120
  # -----------------------
121
+ def analyze_and_seed_chat(prompts_text: str):
122
+ if not prompts_text.strip():
123
+ return "Please enter at least one prompt.", []
124
 
125
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
126
+ scraped = multi_scrape(queries)
127
  if scraped.startswith("ERROR"):
128
  return scraped, []
129
 
130
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions."
131
+ analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
132
  if analysis.startswith("ERROR"):
133
  return analysis, []
134
 
135
+ # Seed chat as a list of dicts
136
  initial_chat = [
137
+ {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
138
+ {"role": "assistant", "content": analysis}
139
  ]
140
  return analysis, initial_chat
141
 
142
+ def continue_chat(chat_messages, user_message, analysis_text):
143
+ if not user_message.strip():
144
+ return chat_messages
145
+ chat_messages.append({"role": "user", "content": user_message})
 
 
 
 
146
  followup_system = (
147
+ "You are AURA, a helpful analyst. The conversation context includes a recently generated analysis."
 
148
  )
149
+ user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely."
150
+ assistant_reply = run_llm_system_and_user(followup_system, user_payload)
151
+ chat_messages.append({"role": "assistant", "content": assistant_reply})
152
+ return chat_messages
153
+
154
+ # Convert dict chat to Gradio format
155
+ def convert_to_gradio_chat_format(chat_messages):
156
+ formatted = []
157
+ i = 0
158
+ while i < len(chat_messages):
159
+ if chat_messages[i]["role"] == "user":
160
+ user_msg = chat_messages[i]["content"]
161
+ assistant_msg = ""
162
+ if i + 1 < len(chat_messages) and chat_messages[i + 1]["role"] == "assistant":
163
+ assistant_msg = chat_messages[i + 1]["content"]
164
+ i += 1
165
+ formatted.append((user_msg, assistant_msg))
166
+ i += 1
167
+ return formatted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  # -----------------------
170
+ # Gradio UI
171
  # -----------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  def build_ui():
173
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
174
+ # Custom CSS
175
  gr.HTML("""
 
176
  <style>
177
+ .gradio-container { max-width: 1200px; margin: 20px auto; font-family: 'Arial', sans-serif; }
178
+ .analysis-box { background: #f9f9f9; border-radius: 10px; padding: 12px; box-shadow: 0 4px 12px rgba(0,0,0,0.08); }
179
+ .chat-box { background: #ffffff; border-radius: 10px; padding: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.05); }
 
 
 
 
 
 
 
 
 
 
 
 
180
  </style>
181
  """)
182
 
183
+ gr.Markdown("# AURA Chat — Hedge Fund Picks")
184
+ gr.Markdown("Enter data prompts (one per line). Click **Analyze**. Then chat about the generated analysis.")
185
+
186
+ with gr.Row():
187
+ # Left container: input
188
+ with gr.Column(scale=1):
189
+ prompts = gr.Textbox(
190
+ lines=6,
191
+ label="Data Prompts (one per line)",
192
+ placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys"
193
+ )
194
+ analyze_btn = gr.Button("Analyze", variant="primary")
195
+ error_box = gr.Markdown("", visible=False)
196
+ gr.Markdown(f"**Fixed settings:** Model = `{LLM_MODEL}`, Max tokens = `{MAX_TOKENS}`, Scrape delay = `{SCRAPE_DELAY}s`")
197
+ gr.Markdown("Add your `OPENAI_API_KEY` to Space Secrets.")
198
+
199
+ # Right container: analysis + chat
200
+ with gr.Column(scale=2):
201
+ analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False, elem_classes="analysis-box")
202
+ gr.Markdown("**Chat with AURA about this analysis**")
203
+ chatbot = gr.Chatbot(label="AURA Chat", height=420, elem_classes="chat-box")
204
+ user_input = gr.Textbox(placeholder="Ask follow-up questions...", label="Your question")
205
+ send_btn = gr.Button("Send")
206
+
207
+ # States
208
+ analysis_state = gr.State("")
209
+ chat_state = gr.State([])
 
 
 
210
 
211
  # Handlers
212
+ def on_analyze(prompts_text):
213
+ analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
214
+ if analysis_text.startswith("ERROR"):
215
+ return "", f"**Error:** {analysis_text}", "", []
216
+ return analysis_text, "", analysis_text, initial_chat
217
+
218
+ def on_send(chat_state_list, user_msg, analysis_text):
219
+ updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
220
+ return updated_history, ""
221
+
222
+ analyze_btn.click(
223
+ fn=on_analyze,
224
+ inputs=[prompts],
225
+ outputs=[analysis_out, error_box, analysis_state, chat_state]
226
+ )
227
+ send_btn.click(
228
+ fn=on_send,
229
+ inputs=[chat_state, user_input, analysis_state],
230
+ outputs=[chat_state, user_input]
231
+ )
232
+ user_input.submit(
233
+ fn=on_send,
234
+ inputs=[chat_state, user_input, analysis_state],
235
+ outputs=[chat_state, user_input]
236
+ )
237
+ chat_state.change(
238
+ fn=convert_to_gradio_chat_format,
239
+ inputs=[chat_state],
240
+ outputs=[chatbot]
241
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  return demo
243
 
244
  # -----------------------