|
|
|
|
|
|
|
|
""" |
|
|
Chat API con Hugging Face para Spaces - IA REAL SIN LÍMITES |
|
|
""" |
|
|
import os |
|
|
import sys |
|
|
import json |
|
|
import requests |
|
|
from flask import Flask, request, jsonify |
|
|
from flask_cors import CORS |
|
|
|
|
|
app = Flask(__name__) |
|
|
CORS(app) |
|
|
|
|
|
|
|
|
POWERFUL_MODELS = [ |
|
|
"microsoft/DialoGPT-large", |
|
|
"facebook/blenderbot-1B-distill", |
|
|
"microsoft/DialoGPT-medium", |
|
|
] |
|
|
|
|
|
HF_TOKEN = os.getenv('HF_TOKEN', '') |
|
|
|
|
|
def generate_response_hf(prompt, model="microsoft/DialoGPT-large", max_tokens=None): |
|
|
"""🎯 IA REAL SIN LÍMITES - NO MÁS RESPUESTAS FALSAS""" |
|
|
try: |
|
|
headers = { |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
if HF_TOKEN: |
|
|
headers["Authorization"] = f"Bearer {HF_TOKEN}" |
|
|
|
|
|
|
|
|
payload = { |
|
|
"inputs": prompt, |
|
|
"parameters": { |
|
|
"max_new_tokens": max_tokens or 500, |
|
|
"temperature": 1.2, |
|
|
"do_sample": True, |
|
|
"top_p": 0.95, |
|
|
"top_k": 50, |
|
|
"repetition_penalty": 1.1, |
|
|
"length_penalty": 1.0, |
|
|
"num_return_sequences": 1 |
|
|
}, |
|
|
"options": { |
|
|
"wait_for_model": True, |
|
|
"use_cache": False |
|
|
} |
|
|
} |
|
|
|
|
|
print(f"🎯 USANDO MODELO: {model}") |
|
|
print(f"🔥 PROMPT ENVIADO: {prompt[:100]}...") |
|
|
|
|
|
response = requests.post( |
|
|
f"/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2F%3Cspan class="hljs-subst">{model}", |
|
|
headers=headers, |
|
|
json=payload, |
|
|
timeout=60 |
|
|
) |
|
|
|
|
|
print(f"📡 STATUS CODE: {response.status_code}") |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
print(f"🤖 RESPUESTA RAW: {result}") |
|
|
|
|
|
if isinstance(result, list) and len(result) > 0: |
|
|
generated_text = result[0].get("generated_text", "") |
|
|
|
|
|
|
|
|
clean_response = generated_text.replace(prompt, "").strip() |
|
|
|
|
|
|
|
|
if clean_response.startswith("Assistant:"): |
|
|
clean_response = clean_response[10:].strip() |
|
|
elif clean_response.startswith("Bot:"): |
|
|
clean_response = clean_response[4:].strip() |
|
|
|
|
|
|
|
|
if not clean_response or len(clean_response) < 5: |
|
|
print("❌ RESPUESTA VACÍA - REINTENTANDO CON MODELO DIFERENTE") |
|
|
|
|
|
for alt_model in POWERFUL_MODELS: |
|
|
if alt_model != model: |
|
|
return generate_response_hf(prompt, alt_model, max_tokens) |
|
|
return "ERROR: La IA no pudo generar respuesta válida" |
|
|
|
|
|
print(f"✅ RESPUESTA FINAL: {clean_response}") |
|
|
return clean_response |
|
|
else: |
|
|
print("❌ FORMATO DE RESPUESTA INVÁLIDO") |
|
|
return "ERROR: Formato de respuesta inválido de la IA" |
|
|
|
|
|
elif response.status_code == 503: |
|
|
print("⏳ MODELO CARGANDO - REINTENTANDO...") |
|
|
import time |
|
|
time.sleep(3) |
|
|
return generate_response_hf(prompt, model, max_tokens) |
|
|
|
|
|
else: |
|
|
print(f"❌ ERROR HTTP: {response.status_code} - {response.text}") |
|
|
return f"ERROR HTTP {response.status_code}: {response.text}" |
|
|
|
|
|
except Exception as e: |
|
|
print(f"💥 EXCEPCIÓN: {str(e)}") |
|
|
return f"ERROR DE CONEXIÓN: {str(e)}" |
|
|
|
|
|
@app.route('/', methods=['GET']) |
|
|
def home(): |
|
|
"""Página de inicio""" |
|
|
return jsonify({ |
|
|
"service": "🤖 IA REAL SIN LÍMITES - GPT OSS STYLE", |
|
|
"status": "running", |
|
|
"host": "Hugging Face Spaces", |
|
|
"models": POWERFUL_MODELS, |
|
|
"config": { |
|
|
"max_tokens": "SIN LÍMITES (hasta 500)", |
|
|
"temperature": "1.2 (máxima creatividad)", |
|
|
"no_fake_responses": True, |
|
|
"real_ai_only": True |
|
|
}, |
|
|
"endpoints": { |
|
|
"POST /chat": "Chat con IA REAL", |
|
|
"GET /models": "Modelos disponibles", |
|
|
"GET /status": "Estado del servicio" |
|
|
} |
|
|
}) |
|
|
|
|
|
@app.route('/chat', methods=['GET']) |
|
|
def chat_get(): |
|
|
"""🔍 INFO DEL ENDPOINT CHAT - PARA PRUEBAS MANUALES""" |
|
|
return jsonify({ |
|
|
"endpoint": "/chat", |
|
|
"method": "POST", |
|
|
"description": "Envía un mensaje y recibe respuesta de IA", |
|
|
"required_fields": { |
|
|
"message": "string - Tu mensaje para la IA" |
|
|
}, |
|
|
"optional_fields": { |
|
|
"model": "string - Modelo a usar (default: microsoft/DialoGPT-medium)", |
|
|
"max_tokens": "number - Máximo tokens (default: 500)" |
|
|
}, |
|
|
"headers": { |
|
|
"Content-Type": "application/json", |
|
|
"Authorization": "Bearer hf_tu_token (opcional)" |
|
|
}, |
|
|
"ejemplo_curl": f"curl -X POST {request.url} -H 'Content-Type: application/json' -d '{{\"message\": \"Hola\"}}'", |
|
|
"ejemplo_python": { |
|
|
"import": "requests", |
|
|
"code": f"requests.post('{request.url}', json={{'message': 'Hola'}})" |
|
|
}, |
|
|
"status": "✅ Endpoint funcionando - Usa POST para chatear" |
|
|
}) |
|
|
|
|
|
@app.route('/chat', methods=['POST']) |
|
|
def chat(): |
|
|
"""🎯 CHAT CON IA REAL - SIN RESPUESTAS FALSAS""" |
|
|
try: |
|
|
|
|
|
auth_header = request.headers.get('Authorization', '') |
|
|
if auth_header: |
|
|
if not auth_header.startswith('Bearer '): |
|
|
return jsonify({ |
|
|
"error": "Invalid username or password.", |
|
|
"details": "El token debe empezar con 'Bearer '" |
|
|
}), 401 |
|
|
|
|
|
|
|
|
token = auth_header.replace('Bearer ', '') |
|
|
if not token.startswith('hf_'): |
|
|
return jsonify({ |
|
|
"error": "Invalid username or password.", |
|
|
"details": "Token debe ser de Hugging Face (empezar con 'hf_')" |
|
|
}), 401 |
|
|
|
|
|
data = request.get_json() |
|
|
|
|
|
if not data or 'message' not in data: |
|
|
return jsonify({ |
|
|
"error": "Se requiere el campo 'message'", |
|
|
"ejemplo": {"message": "Tu pregunta aquí"} |
|
|
}), 400 |
|
|
|
|
|
message = data['message'] |
|
|
model = data.get('model', 'microsoft/DialoGPT-large') |
|
|
max_tokens = data.get('max_tokens', 500) |
|
|
|
|
|
if not message.strip(): |
|
|
return jsonify({ |
|
|
"error": "El mensaje no puede estar vacío" |
|
|
}), 400 |
|
|
|
|
|
print(f"\n🎯 PROCESANDO MENSAJE: {message}") |
|
|
print(f"🔥 MODELO: {model}") |
|
|
print(f"📏 MAX TOKENS: {max_tokens}") |
|
|
|
|
|
|
|
|
response = generate_response_hf(message, model, max_tokens) |
|
|
|
|
|
|
|
|
generic_responses = [ |
|
|
"¡Hola! ¿Cómo estás hoy?", |
|
|
"¡Hey! ¿Qué tal?", |
|
|
"¡Saludos!", |
|
|
"¿Cómo va todo?", |
|
|
"Hello! How are you?" |
|
|
] |
|
|
|
|
|
if any(generic in response for generic in generic_responses): |
|
|
print("🚫 RESPUESTA GENÉRICA DETECTADA - FORZANDO IA REAL") |
|
|
|
|
|
forced_prompt = f"Respond specifically to this message, do not give generic greetings: {message}" |
|
|
response = generate_response_hf(forced_prompt, model, max_tokens) |
|
|
|
|
|
return jsonify({ |
|
|
"success": True, |
|
|
"message": message, |
|
|
"response": response, |
|
|
"model": model, |
|
|
"max_tokens": max_tokens, |
|
|
"service": "🤖 IA REAL - GPT OSS STYLE", |
|
|
"no_limits": True |
|
|
}) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"💥 ERROR GRAVE: {str(e)}") |
|
|
return jsonify({ |
|
|
"error": f"Error del servidor: {str(e)}", |
|
|
"success": False |
|
|
}), 500 |
|
|
|
|
|
@app.route('/models', methods=['GET']) |
|
|
def models(): |
|
|
"""Modelos potentes disponibles""" |
|
|
return jsonify({ |
|
|
"available_models": POWERFUL_MODELS, |
|
|
"recommended": "microsoft/DialoGPT-large", |
|
|
"power_level": "MÁXIMO", |
|
|
"token_limits": "REMOVIDOS", |
|
|
"fake_responses": "ELIMINADAS" |
|
|
}) |
|
|
|
|
|
@app.route('/status', methods=['GET']) |
|
|
def status(): |
|
|
"""Estado del servicio""" |
|
|
return jsonify({ |
|
|
"service": "🤖 IA REAL SIN LÍMITES", |
|
|
"status": "running", |
|
|
"ai_power": "MÁXIMO", |
|
|
"fake_responses": "ELIMINADAS", |
|
|
"token_limits": "REMOVIDOS", |
|
|
"models": len(POWERFUL_MODELS) |
|
|
}) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("=" * 70) |
|
|
print("🚀 INICIANDO IA REAL SIN LÍMITES - GPT OSS STYLE") |
|
|
print("🎯 MODELOS POTENTES ACTIVADOS") |
|
|
print("🚫 RESPUESTAS FALSAS ELIMINADAS") |
|
|
print("♾️ SIN LÍMITES DE TOKENS") |
|
|
print("=" * 70) |
|
|
|
|
|
port = int(os.environ.get('PORT', 7860)) |
|
|
|
|
|
print(f"🌐 API corriendo en puerto {port}") |
|
|
print("💡 Endpoints:") |
|
|
print(" POST /chat - IA REAL SIN LÍMITES") |
|
|
print(" GET /models - Modelos potentes") |
|
|
print(" GET /status - Estado del servicio") |
|
|
print("-" * 70) |
|
|
|
|
|
app.run(host='0.0.0.0', port=port, debug=False) |
|
|
|