import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" from gradio_client import Client, handle_file from typing import Any, Dict, List, Optional, Tuple, Union import requests import json # Lazy initialization to avoid crash if the Space is down at import time _schat_client = None def _get_schat_client(): """Get or create the svision client (lazy initialization).""" global _schat_client if _schat_client is None: _schat_client = Client("VeuReu/schat") return _schat_client def get_from_prompt(prompt): """Send a prompt to the /generate_out_from_prompt endpoint.""" client = _get_schat_client() result = client.predict( prompt=prompt, api_name="/generate_out_from_prompt" ) return result def summarize_sentences_salamandra(sentence, num_words): """ Calls the /resume endpoint of the remote VeuReu/schat Space. """ client = _get_schat_client() result = client.predict( sentence=sentence, num_words=num_words, api_name="/resume" ) return result def identify_characters(sentence, person): """ Calls the /modificat endpoint of the remote VeuReu/schat Space. """ client = _get_schat_client() result = client.predict( sentence=sentence, person=person, api_name="/modificat" ) return result def free_narration_schat(srt_text): """ Calls the /narracio endpoint of the remote VeuReu/schat Space. """ client = _get_schat_client() result = client.predict( srt_text=srt_text, api_name="/narració" ) return result