Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python3 | |
| """ | |
| Enhanced Gemini Multi-API - Anthropic Compatible API Service | |
| Provides Google Gemini models through Anthropic-compatible interface | |
| """ | |
| import os | |
| import json | |
| import time | |
| import uuid | |
| from datetime import datetime | |
| from typing import List, Dict, Any, Optional, Union | |
| from dataclasses import dataclass | |
| from flask import Flask, request, jsonify | |
| import google.generativeai as genai | |
| from google.generativeai.types import HarmCategory, HarmBlockThreshold | |
| import requests | |
| # Initialize Flask app | |
| app = Flask(__name__) | |
| # API Configuration | |
| GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', '') | |
| DEFAULT_MODEL = "gemini-1.5-flash" | |
| MAX_TOKENS = 4096 | |
| DEFAULT_TEMPERATURE = 0.7 | |
| # Initialize Gemini | |
| if GEMINI_API_KEY: | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| gemini_model = genai.GenerativeModel(DEFAULT_MODEL) | |
| else: | |
| gemini_model = None | |
| # API Configuration | |
| API_VERSION = "v1" | |
| BASE_PATH = f"/{API_VERSION}" | |
| # Available Models | |
| MODELS = { | |
| "claude-3-sonnet-20240229": "gemini-1.5-pro", | |
| "claude-3-haiku-20240307": "gemini-1.5-flash", | |
| "claude-3-5-sonnet-20241022": "gemini-1.5-pro", | |
| "claude-3-5-haiku-20241022": "gemini-1.5-flash", | |
| "gemini-1.5-pro": "gemini-1.5-pro", | |
| "gemini-1.5-flash": "gemini-1.5-flash", | |
| "gemini-1.5-pro-002": "gemini-1.5-pro-002", | |
| "gemini-1.5-flash-8b": "gemini-1.5-flash-8b", | |
| "gemini-2.0-flash-exp": "gemini-2.0-flash-exp" | |
| } | |
| class Message: | |
| role: str | |
| content: str | |
| class AnthropicCompatibleAPI: | |
| """Anthropic API compatible wrapper for Gemini models""" | |
| def __init__(self, gemini_api_key: str): | |
| if not gemini_api_key: | |
| raise ValueError("GEMINI_API_KEY is required") | |
| genai.configure(api_key=gemini_api_key) | |
| self.api_key = gemini_api_key | |
| def chat_completion(self, messages: List[Dict], model: str = DEFAULT_MODEL, **kwargs) -> Dict: | |
| """OpenAI/Anthropic compatible chat completion""" | |
| # Map Anthropic model names to Gemini models | |
| gemini_model_name = self._map_model(model) | |
| # Extract parameters | |
| max_tokens = kwargs.get('max_tokens', MAX_TOKENS) | |
| temperature = kwargs.get('temperature', DEFAULT_TEMPERATURE) | |
| stop_sequences = kwargs.get('stop_sequences', []) | |
| # Build system prompt and user content | |
| system_prompt = "" | |
| conversation_messages = [] | |
| for msg in messages: | |
| role = msg.get('role', '') | |
| content = msg.get('content', '') | |
| if role == 'system': | |
| system_prompt += content + "\n" | |
| else: | |
| conversation_messages.append({"role": role, "parts": [content]}) | |
| # Create Gemini prompt | |
| full_prompt = system_prompt.strip() if system_prompt else "" | |
| if conversation_messages: | |
| last_message = conversation_messages[-1]['parts'][0] | |
| if full_prompt: | |
| full_prompt += f"\n\nHuman: {last_message}" | |
| else: | |
| full_prompt = last_message | |
| full_prompt += "\n\nAssistant:" | |
| try: | |
| # Generate response using Gemini | |
| model_instance = genai.GenerativeModel(gemini_model_name) | |
| generation_config = { | |
| 'temperature': temperature, | |
| 'max_output_tokens': max_tokens, | |
| 'top_p': kwargs.get('top_p', 0.9), | |
| 'top_k': kwargs.get('top_k', 40) | |
| } | |
| if stop_sequences: | |
| generation_config['stop_sequences'] = stop_sequences | |
| response = model_instance.generate_content( | |
| full_prompt, | |
| generation_config=genai.types.GenerationConfig(**generation_config) | |
| ) | |
| # Format response to match Anthropic format | |
| response_text = response.text | |
| # Calculate usage (approximate) | |
| input_tokens = len(full_prompt.split()) * 1.3 # Rough approximation | |
| output_tokens = len(response_text.split()) * 1.3 | |
| return { | |
| "id": f"msg_{str(uuid.uuid4())[:8]}", | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": response_text | |
| } | |
| ], | |
| "model": model, | |
| "stop_reason": "end_turn", | |
| "stop_sequence": None, | |
| "usage": { | |
| "input_tokens": int(input_tokens), | |
| "output_tokens": int(output_tokens), | |
| "cache_creation_input_tokens": 0, | |
| "cache_read_input_tokens": 0 | |
| }, | |
| "created_at": int(time.time()) | |
| } | |
| except Exception as e: | |
| return { | |
| "error": { | |
| "type": "api_error", | |
| "message": str(e), | |
| "code": "INTERNAL_ERROR" | |
| } | |
| } | |
| def _map_model(self, model: str) -> str: | |
| """Map model names to Gemini equivalents""" | |
| return MODELS.get(model, DEFAULT_MODEL) | |
| def list_models(self) -> Dict: | |
| """List available models in Anthropic format""" | |
| models = [ | |
| { | |
| "id": "claude-3-sonnet-20240229", | |
| "object": "model", | |
| "created": 1710364800, | |
| "owned_by": "google-gemini", | |
| "name": "claude-3-sonnet-20240229", | |
| "display_name": "Gemini 1.5 Pro (Claude Compatible)", | |
| "description": "Anthropic-compatible access to Gemini 1.5 Pro", | |
| "input_token_limit": 2000000, | |
| "output_token_limit": 8192, | |
| "top_provider": { | |
| "context_length": 2000000, | |
| "max_completion_tokens": 8192 | |
| } | |
| }, | |
| { | |
| "id": "claude-3-haiku-20240307", | |
| "object": "model", | |
| "created": 1710364800, | |
| "owned_by": "google-gemini", | |
| "name": "claude-3-haiku-20240307", | |
| "display_name": "Gemini 1.5 Flash (Claude Compatible)", | |
| "description": "Anthropic-compatible access to Gemini 1.5 Flash", | |
| "input_token_limit": 2000000, | |
| "output_token_limit": 8192, | |
| "top_provider": { | |
| "context_length": 2000000, | |
| "max_completion_tokens": 8192 | |
| } | |
| } | |
| ] | |
| return {"object": "list", "data": models} | |
| # Global API instance | |
| api = AnthropicCompatibleAPI(GEMINI_API_KEY) if GEMINI_API_KEY else None | |
| # API Routes | |
| def list_models(): | |
| """List available models - Anthropic compatible""" | |
| if not GEMINI_API_KEY: | |
| return jsonify({ | |
| "error": { | |
| "type": "configuration_error", | |
| "message": "GEMINI_API_KEY not configured", | |
| "code": "CONFIGURATION_ERROR" | |
| } | |
| }), 401 | |
| try: | |
| models = api.list_models() | |
| return jsonify(models) | |
| except Exception as e: | |
| return jsonify({ | |
| "error": { | |
| "type": "api_error", | |
| "message": str(e), | |
| "code": "INTERNAL_ERROR" | |
| } | |
| }), 500 | |
| def create_message(): | |
| """Create a message - Anthropic compatible""" | |
| if not GEMINI_API_KEY: | |
| return jsonify({ | |
| "error": { | |
| "type": "configuration_error", | |
| "message": "GEMINI_API_KEY not configured", | |
| "code": "CONFIGURATION_ERROR" | |
| } | |
| }), 401 | |
| # Get request data | |
| data = request.get_json() | |
| if not data: | |
| return jsonify({ | |
| "error": { | |
| "type": "invalid_request_error", | |
| "message": "Request body is required", | |
| "code": "INVALID_REQUEST" | |
| } | |
| }), 400 | |
| # Validate required fields | |
| required_fields = ["model", "messages"] | |
| for field in required_fields: | |
| if field not in data: | |
| return jsonify({ | |
| "error": { | |
| "type": "invalid_request_error", | |
| "message": f"Missing required field: {field}", | |
| "code": "INVALID_REQUEST" | |
| } | |
| }), 400 | |
| # Extract parameters | |
| model = data["model"] | |
| messages = data["messages"] | |
| max_tokens = data.get("max_tokens", MAX_TOKENS) | |
| temperature = data.get("temperature", DEFAULT_TEMPERATURE) | |
| try: | |
| # Call Gemini through our API wrapper | |
| response = api.chat_completion( | |
| messages=messages, | |
| model=model, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| stop_sequences=data.get("stop_sequences"), | |
| top_p=data.get("top_p"), | |
| top_k=data.get("top_k") | |
| ) | |
| if "error" in response: | |
| return jsonify(response), 500 | |
| return jsonify(response) | |
| except Exception as e: | |
| return jsonify({ | |
| "error": { | |
| "type": "api_error", | |
| "message": str(e), | |
| "code": "INTERNAL_ERROR" | |
| } | |
| }), 500 | |
| def create_completion(): | |
| """Create completion - OpenAI compatible fallback""" | |
| return create_message() # Same implementation | |
| def health_check(): | |
| """Health check endpoint""" | |
| return jsonify({ | |
| "status": "healthy", | |
| "service": "Enhanced Gemini Multi-API", | |
| "version": "1.0.0", | |
| "timestamp": datetime.now().isoformat(), | |
| "api_key_configured": bool(GEMINI_API_KEY), | |
| "available_models": len(MODELS) | |
| }) | |
| def api_info(): | |
| """API information endpoint""" | |
| return jsonify({ | |
| "service": "Enhanced Gemini Multi-API", | |
| "description": "Google Gemini models through Anthropic-compatible interface", | |
| "version": "1.0.0", | |
| "endpoints": { | |
| "list_models": f"{BASE_PATH}/models", | |
| "create_message": f"{BASE_PATH}/messages", | |
| "create_completion": f"{BASE_PATH}/completions", | |
| "health": "/health", | |
| "info": "/info" | |
| }, | |
| "supported_models": list(MODELS.keys()), | |
| "documentation": "https://docs.anthropic.com/claude/reference", | |
| "api_key_required": True | |
| }) | |
| def not_found(error): | |
| return jsonify({ | |
| "error": { | |
| "type": "not_found_error", | |
| "message": "Endpoint not found", | |
| "code": "NOT_FOUND" | |
| } | |
| }), 404 | |
| def internal_error(error): | |
| return jsonify({ | |
| "error": { | |
| "type": "api_error", | |
| "message": "Internal server error", | |
| "code": "INTERNAL_ERROR" | |
| } | |
| }), 500 | |
| if __name__ == "__main__": | |
| port = int(os.environ.get("PORT", 8080)) | |
| if not GEMINI_API_KEY: | |
| print("β οΈ WARNING: GEMINI_API_KEY not configured") | |
| print(" Set GEMINI_API_KEY environment variable for full functionality") | |
| print(" Without API key: Only health and info endpoints will work") | |
| print(f"π Enhanced Gemini Multi-API starting on port {port}") | |
| print(f"π API Info: http://localhost:{port}/info") | |
| print(f"β€οΈ Health Check: http://localhost:{port}/health") | |
| print(f"π Models: POST http://localhost:{port}{BASE_PATH}/messages") | |
| app.run(host="0.0.0.0", port=port, debug=False) |