likhonsheikh commited on
Commit
fe39382
Β·
verified Β·
1 Parent(s): f468342

Deploy Enhanced Gemini Multi-API Service

Browse files
Files changed (1) hide show
  1. app.py +311 -395
app.py CHANGED
@@ -1,449 +1,365 @@
1
  #!/usr/bin/env python3
2
  """
3
- Enhanced Gemini Multi-API - Full Application for HuggingFace Space
4
- Complete API compatibility and Gemini functionality
5
  """
6
 
7
  import os
8
  import json
 
 
 
 
 
 
 
 
9
  import requests
10
- from typing import List, Dict, Any, Optional
11
- import gradio as gr
12
 
13
- # Initialize API keys from environment
 
 
 
14
  GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', '')
15
- BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- class GeminiAPI:
18
- """Gemini API client for Enhanced Multi-API functionality"""
 
 
 
 
 
19
 
20
- def __init__(self, api_key: str):
21
- self.api_key = api_key
22
- self.base_url = BASE_URL
23
-
24
- def _make_request(self, endpoint: str, data: dict) -> dict:
25
- """Make API request to Gemini"""
26
- url = f"{self.base_url}/{endpoint}?key={self.api_key}"
27
- headers = {
28
- 'Content-Type': 'application/json',
29
- 'User-Agent': 'Enhanced-Gemini-Multi-API/1.0'
30
- }
31
 
32
- try:
33
- response = requests.post(url, headers=headers, json=data, timeout=30)
34
- response.raise_for_status()
35
- return response.json()
36
- except requests.exceptions.RequestException as e:
37
- return {"error": str(e)}
38
 
39
- def chat_completion(self, model: str, messages: List[Dict], **kwargs) -> Dict:
40
- """Chat completion compatible with OpenAI/Anthropic format"""
41
 
42
- # Extract system and user messages
43
- system_message = ""
44
- user_messages = []
45
 
46
- for msg in messages:
47
- if msg.get('role') == 'system':
48
- system_message = msg.get('content', '')
49
- elif msg.get('role') == 'user':
50
- user_messages.append(msg.get('content', ''))
51
 
52
- user_input = user_messages[-1] if user_messages else ""
 
 
53
 
54
- # Prepare Gemini request
55
- request_data = {
56
- "contents": [{
57
- "parts": [{"text": f"{system_message}\n\nUser: {user_input}"}]
58
- }],
59
- "generationConfig": {
60
- "temperature": kwargs.get('temperature', 0.7),
61
- "maxOutputTokens": kwargs.get('max_tokens', 1024),
62
- "topP": kwargs.get('top_p', 0.9),
63
- "topK": kwargs.get('top_k', 40)
64
- }
65
- }
66
 
67
- endpoint = f"models/{model}:generateContent"
68
- response = self._make_request(endpoint, request_data)
 
 
 
 
 
 
69
 
70
- if "error" in response:
71
- return {"error": response["error"]}
72
 
73
- # Format response to match OpenAI/Anthropic format
74
  try:
75
- content = response.get('candidates', [{}])[0].get('content', {}).get('parts', [{}])[0].get('text', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  return {
78
- "choices": [{
79
- "message": {
80
- "content": content,
81
- "role": "assistant"
82
- },
83
- "finish_reason": "stop"
84
- }],
 
 
85
  "model": model,
 
 
86
  "usage": {
87
- "prompt_tokens": response.get('usageMetadata', {}).get('promptTokenCount', 0),
88
- "completion_tokens": response.get('usageMetadata', {}).get('candidatesTokenCount', 0),
89
- "total_tokens": response.get('usageMetadata', {}).get('totalTokenCount', 0)
 
 
 
 
 
 
 
 
 
 
 
90
  }
91
  }
92
- except (KeyError, IndexError) as e:
93
- return {"error": f"Response parsing error: {str(e)}"}
 
 
94
 
95
  def list_models(self) -> Dict:
96
- """List available Gemini models"""
97
  models = [
98
- "gemini-1.5-pro",
99
- "gemini-1.5-pro-002",
100
- "gemini-1.5-flash",
101
- "gemini-1.5-flash-8b",
102
- "gemini-2.0-flash-exp"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ]
104
 
105
- return {
106
- "data": [
107
- {
108
- "id": model,
109
- "object": "model",
110
- "created": 2024,
111
- "owned_by": "google"
112
- }
113
- for model in models
114
- ]
115
- }
116
 
117
- # Global API client
118
- api_client = None
119
 
120
- def get_api_client():
121
- """Get or initialize API client"""
122
- global api_client
123
- if not api_client and GEMINI_API_KEY:
124
- api_client = GeminiAPI(GEMINI_API_KEY)
125
- return api_client
126
-
127
- def chat_with_gemini(message, history, model, temperature, max_tokens):
128
- """Enhanced chat interface with Gemini"""
129
- global api_client
130
-
131
- if not GEMINI_API_KEY:
132
- return "❌ Error: GEMINI_API_KEY not configured. Please set the API key in Space secrets."
133
-
134
- client = get_api_client()
135
- if not client:
136
- return "❌ Error: Failed to initialize API client."
137
-
138
- # Format messages for API
139
- messages = []
140
- if history:
141
- for user_msg, assistant_msg in history:
142
- messages.append({"role": "user", "content": user_msg})
143
- messages.append({"role": "assistant", "content": assistant_msg})
144
- messages.append({"role": "user", "content": message})
145
-
146
- # Make API call
147
- response = client.chat_completion(
148
- model=model,
149
- messages=messages,
150
- temperature=temperature,
151
- max_tokens=max_tokens
152
- )
153
-
154
- if "error" in response:
155
- return f"❌ Error: {response['error']}"
156
-
157
- try:
158
- content = response["choices"][0]["message"]["content"]
159
- usage = response.get("usage", {})
160
- tokens_used = usage.get("total_tokens", 0)
161
-
162
- return f"{content}\n\n---\nπŸ’¬ **Tokens Used**: {tokens_used}"
163
- except (KeyError, IndexError):
164
- return "❌ Error: Unable to parse API response"
165
 
166
- def openai_compatible_chat(messages, model, temperature, max_tokens):
167
- """OpenAI/Anthropic compatible chat interface"""
 
168
  if not GEMINI_API_KEY:
169
- return {"error": "GEMINI_API_KEY not configured"}
170
-
171
- client = get_api_client()
172
- if not client:
173
- return {"error": "Failed to initialize API client"}
174
-
175
- response = client.chat_completion(
176
- model=model,
177
- messages=messages,
178
- temperature=temperature,
179
- max_tokens=max_tokens
180
- )
181
-
182
- return response
183
-
184
- def get_available_models():
185
- """Get list of available models"""
186
- client = get_api_client()
187
- if not client:
188
- return ["Error: API client not initialized"]
189
 
190
  try:
191
- models_response = client.list_models()
192
- return [model["id"] for model in models_response.get("data", [])]
193
  except Exception as e:
194
- return [f"Error: {str(e)}"]
 
 
 
 
 
 
195
 
196
- def test_api_connection():
197
- """Test API connection"""
 
198
  if not GEMINI_API_KEY:
199
- return "❌ GEMINI_API_KEY not configured"
200
-
201
- client = get_api_client()
202
- if not client:
203
- return "❌ Failed to initialize API client"
 
 
204
 
205
- # Simple test request
206
- test_messages = [{"role": "user", "content": "Hello, test connection"}]
207
- response = client.chat_completion("gemini-1.5-flash", test_messages)
 
 
 
 
 
 
 
208
 
209
- if "error" in response:
210
- return f"❌ API Test Failed: {response['error']}"
211
- else:
212
- return "βœ… API Connection Successful! Ready for Enhanced Gemini Multi-API functionality."
213
-
214
- def get_models_api():
215
- """API endpoint for getting models"""
216
- if not GEMINI_API_KEY:
217
- return {"error": "GEMINI_API_KEY not configured"}
 
 
218
 
219
- client = get_api_client()
220
- if not client:
221
- return {"error": "Failed to initialize API client"}
 
 
222
 
223
  try:
224
- return client.list_models()
225
- except Exception as e:
226
- return {"error": str(e)}
227
-
228
- def create_interface():
229
- """Create the full Enhanced Gemini Multi-API interface"""
230
-
231
- # Available models
232
- models = get_available_models()
233
-
234
- with gr.Blocks(
235
- title="Enhanced Gemini Multi-API",
236
- theme=gr.themes.Soft(),
237
- show_error=True,
238
- server_name="0.0.0.0",
239
- server_port=7860
240
- ) as demo:
241
-
242
- # Header
243
- gr.HTML("""
244
- <div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 15px; margin-bottom: 2rem;">
245
- <h1>πŸš€ Enhanced Gemini Multi-API</h1>
246
- <p>OpenAI/Anthropic Compatible β€’ Gemini Models β€’ Full API Features</p>
247
- <p><strong>Status:</strong> All Issues Resolved! Ready for Production!</p>
248
- </div>
249
- """)
250
-
251
- # API Status Tab
252
- with gr.Tab("πŸ”§ API Status"):
253
- gr.HTML("<h3>πŸ”§ API Configuration & Testing</h3>")
254
-
255
- with gr.Row():
256
- test_btn = gr.Button("πŸ§ͺ Test API Connection", variant="primary")
257
- models_btn = gr.Button("πŸ“‹ Get Models", variant="secondary")
258
-
259
- status_output = gr.Textbox(
260
- label="API Status",
261
- lines=5,
262
- interactive=False
263
- )
264
-
265
- models_output = gr.Textbox(
266
- label="Available Models",
267
- lines=5,
268
- interactive=False
269
- )
270
-
271
- # Chat Interface Tab
272
- with gr.Tab("πŸ’¬ Chat Interface"):
273
- gr.HTML("<h3>πŸ’¬ Enhanced Chat with Gemini Models</h3>")
274
-
275
- with gr.Row():
276
- model_dropdown = gr.Dropdown(
277
- choices=models,
278
- value=models[0] if models else "gemini-1.5-flash",
279
- label="🧠 Model",
280
- info="Select Gemini model for chat"
281
- )
282
-
283
- temp_slider = gr.Slider(
284
- minimum=0.0,
285
- maximum=2.0,
286
- value=0.7,
287
- step=0.1,
288
- label="🌑️ Temperature",
289
- info="Controls randomness in responses"
290
- )
291
-
292
- max_tokens_slider = gr.Slider(
293
- minimum=256,
294
- maximum=4096,
295
- value=1024,
296
- step=256,
297
- label="πŸ“ Max Tokens",
298
- info="Maximum response length"
299
- )
300
-
301
- chatbot = gr.Chatbot(height=400, label="Chat")
302
-
303
- with gr.Row():
304
- msg = gr.Textbox(
305
- label="πŸ’­ Your Message",
306
- placeholder="Enter your message here...",
307
- lines=2
308
- )
309
-
310
- with gr.Row():
311
- send_btn = gr.Button("πŸš€ Send", variant="primary")
312
- clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
313
-
314
- # API Testing Tab
315
- with gr.Tab("πŸ§ͺ API Testing"):
316
- gr.HTML("<h3>πŸ§ͺ OpenAI/Anthropic Compatible API Testing</h3>")
317
-
318
- model_dropdown_test = gr.Dropdown(
319
- choices=models,
320
- value=models[0] if models else "gemini-1.5-flash",
321
- label="🧠 Model"
322
- )
323
-
324
- temp_slider_test = gr.Slider(
325
- minimum=0.0,
326
- maximum=2.0,
327
- value=0.7,
328
- step=0.1,
329
- label="🌑️ Temperature"
330
- )
331
-
332
- max_tokens_slider_test = gr.Slider(
333
- minimum=256,
334
- maximum=4096,
335
- value=1024,
336
- step=256,
337
- label="πŸ“ Max Tokens"
338
- )
339
-
340
- messages_input = gr.Textbox(
341
- label="πŸ’­ Messages (JSON format)",
342
- placeholder='[{"role": "user", "content": "Hello!"}]',
343
- lines=6
344
- )
345
-
346
- api_test_btn = gr.Button("πŸš€ Test API", variant="primary")
347
- api_response_output = gr.Textbox(
348
- label="πŸ“€ API Response",
349
- lines=10,
350
- interactive=False
351
- )
352
-
353
- # Models Information Tab
354
- with gr.Tab("πŸ“‹ Models Info"):
355
- gr.HTML("<h3>πŸ“‹ Available Gemini Models</h3>")
356
-
357
- gr.HTML("""
358
- <div style="background: #f8f9fa; padding: 1.5rem; border-radius: 10px; margin-bottom: 1rem;">
359
- <h4>πŸ€– Available Models:</h4>
360
- <ul>
361
- <li><strong>gemini-1.5-pro</strong> - Advanced reasoning and complex tasks</li>
362
- <li><strong>gemini-1.5-pro-002</strong> - Latest pro version with improvements</li>
363
- <li><strong>gemini-1.5-flash</strong> - Fast, efficient for most use cases</li>
364
- <li><strong>gemini-1.5-flash-8b</strong> - Ultra-fast for simple tasks</li>
365
- <li><strong>gemini-2.0-flash-exp</strong> - Experimental latest model</li>
366
- </ul>
367
- </div>
368
- """)
369
-
370
- models_btn2 = gr.Button("πŸ”„ Refresh Models List")
371
- models_info_output = gr.Textbox(
372
- label="Models Information",
373
- lines=8,
374
- interactive=False
375
- )
376
-
377
- # Documentation Tab
378
- with gr.Tab("πŸ“š Documentation"):
379
- gr.HTML("""
380
- <div style="background: #e8f5e8; padding: 1.5rem; border-radius: 10px; border-left: 4px solid #28a745;">
381
- <h4>πŸ“š Enhanced Gemini Multi-API Documentation</h4>
382
- <p><strong>Features:</strong></p>
383
- <ul>
384
- <li>βœ… <strong>OpenAI Compatible:</strong> Works with OpenAI SDKs and APIs</li>
385
- <li>βœ… <strong>Anthropic Compatible:</strong> Compatible with Anthropic API format</li>
386
- <li>βœ… <strong>Full Gemini Suite:</strong> All Gemini models available</li>
387
- <li>βœ… <strong>Chat Interface:</strong> Interactive chat with history</li>
388
- <li>βœ… <strong>API Testing:</strong> Direct API testing and debugging</li>
389
- <li>βœ… <strong>Model Management:</strong> Dynamic model selection</li>
390
- </ul>
391
-
392
- <p><strong>API Key:</strong> Configure GEMINI_API_KEY in Space secrets</p>
393
- <p><strong>Updated:</strong> 2025-11-14 04:14:57</p>
394
- </div>
395
- """)
396
-
397
- # Event handlers
398
- test_btn.click(test_api_connection, outputs=[status_output])
399
- models_btn.click(get_available_models, outputs=[models_output])
400
- models_btn2.click(get_available_models, outputs=[models_info_output])
401
-
402
- # Chat interface events
403
- def user(user_message, history):
404
- return "", history + [(user_message, None)]
405
-
406
- def bot(history, model, temperature, max_tokens):
407
- if not history:
408
- return history
409
-
410
- user_message, _ = history[-1]
411
- bot_message = chat_with_gemini(user_message, history[:-1], model, temperature, max_tokens)
412
- history[-1] = (user_message, bot_message)
413
- return history
414
-
415
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
416
- bot, [chatbot, model_dropdown, temp_slider, max_tokens_slider], [chatbot]
417
  )
418
 
419
- send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
420
- bot, [chatbot, model_dropdown, temp_slider, max_tokens_slider], [chatbot]
421
- )
422
 
423
- clear_btn.click(lambda: None, outputs=[chatbot], queue=False)
424
 
425
- # API testing events
426
- api_test_btn.click(
427
- lambda model, temp, max_tokens, messages: json.dumps(
428
- openai_compatible_chat(json.loads(messages), model, temp, max_tokens),
429
- indent=2
430
- ),
431
- [model_dropdown_test, temp_slider_test, max_tokens_slider_test, messages_input],
432
- [api_response_output]
433
- )
434
-
435
- return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
 
437
  if __name__ == "__main__":
438
- # Create and launch interface
439
- demo = create_interface()
 
 
 
 
440
 
441
- port = int(os.environ.get("PORT", 7860))
 
 
 
442
 
443
- demo.launch(
444
- server_name="0.0.0.0",
445
- server_port=port,
446
- share=False,
447
- show_error=True,
448
- debug=False
449
- )
 
1
  #!/usr/bin/env python3
2
  """
3
+ Enhanced Gemini Multi-API - Anthropic Compatible API Service
4
+ Provides Google Gemini models through Anthropic-compatible interface
5
  """
6
 
7
  import os
8
  import json
9
+ import time
10
+ import uuid
11
+ from datetime import datetime
12
+ from typing import List, Dict, Any, Optional, Union
13
+ from dataclasses import dataclass
14
+ from flask import Flask, request, jsonify
15
+ import google.generativeai as genai
16
+ from google.generativeai.types import HarmCategory, HarmBlockThreshold
17
  import requests
 
 
18
 
19
+ # Initialize Flask app
20
+ app = Flask(__name__)
21
+
22
+ # API Configuration
23
  GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', '')
24
+ DEFAULT_MODEL = "gemini-1.5-flash"
25
+ MAX_TOKENS = 4096
26
+ DEFAULT_TEMPERATURE = 0.7
27
+
28
+ # Initialize Gemini
29
+ if GEMINI_API_KEY:
30
+ genai.configure(api_key=GEMINI_API_KEY)
31
+ gemini_model = genai.GenerativeModel(DEFAULT_MODEL)
32
+ else:
33
+ gemini_model = None
34
+
35
+ # API Configuration
36
+ API_VERSION = "v1"
37
+ BASE_PATH = f"/{API_VERSION}"
38
+
39
+ # Available Models
40
+ MODELS = {
41
+ "claude-3-sonnet-20240229": "gemini-1.5-pro",
42
+ "claude-3-haiku-20240307": "gemini-1.5-flash",
43
+ "claude-3-5-sonnet-20241022": "gemini-1.5-pro",
44
+ "claude-3-5-haiku-20241022": "gemini-1.5-flash",
45
+ "gemini-1.5-pro": "gemini-1.5-pro",
46
+ "gemini-1.5-flash": "gemini-1.5-flash",
47
+ "gemini-1.5-pro-002": "gemini-1.5-pro-002",
48
+ "gemini-1.5-flash-8b": "gemini-1.5-flash-8b",
49
+ "gemini-2.0-flash-exp": "gemini-2.0-flash-exp"
50
+ }
51
 
52
+ @dataclass
53
+ class Message:
54
+ role: str
55
+ content: str
56
+
57
+ class AnthropicCompatibleAPI:
58
+ """Anthropic API compatible wrapper for Gemini models"""
59
 
60
+ def __init__(self, gemini_api_key: str):
61
+ if not gemini_api_key:
62
+ raise ValueError("GEMINI_API_KEY is required")
 
 
 
 
 
 
 
 
63
 
64
+ genai.configure(api_key=gemini_api_key)
65
+ self.api_key = gemini_api_key
 
 
 
 
66
 
67
+ def chat_completion(self, messages: List[Dict], model: str = DEFAULT_MODEL, **kwargs) -> Dict:
68
+ """OpenAI/Anthropic compatible chat completion"""
69
 
70
+ # Map Anthropic model names to Gemini models
71
+ gemini_model_name = self._map_model(model)
 
72
 
73
+ # Extract parameters
74
+ max_tokens = kwargs.get('max_tokens', MAX_TOKENS)
75
+ temperature = kwargs.get('temperature', DEFAULT_TEMPERATURE)
76
+ stop_sequences = kwargs.get('stop_sequences', [])
 
77
 
78
+ # Build system prompt and user content
79
+ system_prompt = ""
80
+ conversation_messages = []
81
 
82
+ for msg in messages:
83
+ role = msg.get('role', '')
84
+ content = msg.get('content', '')
85
+
86
+ if role == 'system':
87
+ system_prompt += content + "\n"
88
+ else:
89
+ conversation_messages.append({"role": role, "parts": [content]})
 
 
 
 
90
 
91
+ # Create Gemini prompt
92
+ full_prompt = system_prompt.strip() if system_prompt else ""
93
+ if conversation_messages:
94
+ last_message = conversation_messages[-1]['parts'][0]
95
+ if full_prompt:
96
+ full_prompt += f"\n\nHuman: {last_message}"
97
+ else:
98
+ full_prompt = last_message
99
 
100
+ full_prompt += "\n\nAssistant:"
 
101
 
 
102
  try:
103
+ # Generate response using Gemini
104
+ model_instance = genai.GenerativeModel(gemini_model_name)
105
+
106
+ generation_config = {
107
+ 'temperature': temperature,
108
+ 'max_output_tokens': max_tokens,
109
+ 'top_p': kwargs.get('top_p', 0.9),
110
+ 'top_k': kwargs.get('top_k', 40)
111
+ }
112
+
113
+ if stop_sequences:
114
+ generation_config['stop_sequences'] = stop_sequences
115
+
116
+ response = model_instance.generate_content(
117
+ full_prompt,
118
+ generation_config=genai.types.GenerationConfig(**generation_config)
119
+ )
120
+
121
+ # Format response to match Anthropic format
122
+ response_text = response.text
123
+
124
+ # Calculate usage (approximate)
125
+ input_tokens = len(full_prompt.split()) * 1.3 # Rough approximation
126
+ output_tokens = len(response_text.split()) * 1.3
127
 
128
  return {
129
+ "id": f"msg_{str(uuid.uuid4())[:8]}",
130
+ "type": "message",
131
+ "role": "assistant",
132
+ "content": [
133
+ {
134
+ "type": "text",
135
+ "text": response_text
136
+ }
137
+ ],
138
  "model": model,
139
+ "stop_reason": "end_turn",
140
+ "stop_sequence": None,
141
  "usage": {
142
+ "input_tokens": int(input_tokens),
143
+ "output_tokens": int(output_tokens),
144
+ "cache_creation_input_tokens": 0,
145
+ "cache_read_input_tokens": 0
146
+ },
147
+ "created_at": int(time.time())
148
+ }
149
+
150
+ except Exception as e:
151
+ return {
152
+ "error": {
153
+ "type": "api_error",
154
+ "message": str(e),
155
+ "code": "INTERNAL_ERROR"
156
  }
157
  }
158
+
159
+ def _map_model(self, model: str) -> str:
160
+ """Map model names to Gemini equivalents"""
161
+ return MODELS.get(model, DEFAULT_MODEL)
162
 
163
  def list_models(self) -> Dict:
164
+ """List available models in Anthropic format"""
165
  models = [
166
+ {
167
+ "id": "claude-3-sonnet-20240229",
168
+ "object": "model",
169
+ "created": 1710364800,
170
+ "owned_by": "google-gemini",
171
+ "name": "claude-3-sonnet-20240229",
172
+ "display_name": "Gemini 1.5 Pro (Claude Compatible)",
173
+ "description": "Anthropic-compatible access to Gemini 1.5 Pro",
174
+ "input_token_limit": 2000000,
175
+ "output_token_limit": 8192,
176
+ "top_provider": {
177
+ "context_length": 2000000,
178
+ "max_completion_tokens": 8192
179
+ }
180
+ },
181
+ {
182
+ "id": "claude-3-haiku-20240307",
183
+ "object": "model",
184
+ "created": 1710364800,
185
+ "owned_by": "google-gemini",
186
+ "name": "claude-3-haiku-20240307",
187
+ "display_name": "Gemini 1.5 Flash (Claude Compatible)",
188
+ "description": "Anthropic-compatible access to Gemini 1.5 Flash",
189
+ "input_token_limit": 2000000,
190
+ "output_token_limit": 8192,
191
+ "top_provider": {
192
+ "context_length": 2000000,
193
+ "max_completion_tokens": 8192
194
+ }
195
+ }
196
  ]
197
 
198
+ return {"object": "list", "data": models}
 
 
 
 
 
 
 
 
 
 
199
 
200
+ # Global API instance
201
+ api = AnthropicCompatibleAPI(GEMINI_API_KEY) if GEMINI_API_KEY else None
202
 
203
+ # API Routes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
+ @app.route(f"{BASE_PATH}/models", methods=["GET"])
206
+ def list_models():
207
+ """List available models - Anthropic compatible"""
208
  if not GEMINI_API_KEY:
209
+ return jsonify({
210
+ "error": {
211
+ "type": "configuration_error",
212
+ "message": "GEMINI_API_KEY not configured",
213
+ "code": "CONFIGURATION_ERROR"
214
+ }
215
+ }), 401
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
  try:
218
+ models = api.list_models()
219
+ return jsonify(models)
220
  except Exception as e:
221
+ return jsonify({
222
+ "error": {
223
+ "type": "api_error",
224
+ "message": str(e),
225
+ "code": "INTERNAL_ERROR"
226
+ }
227
+ }), 500
228
 
229
+ @app.route(f"{BASE_PATH}/messages", methods=["POST"])
230
+ def create_message():
231
+ """Create a message - Anthropic compatible"""
232
  if not GEMINI_API_KEY:
233
+ return jsonify({
234
+ "error": {
235
+ "type": "configuration_error",
236
+ "message": "GEMINI_API_KEY not configured",
237
+ "code": "CONFIGURATION_ERROR"
238
+ }
239
+ }), 401
240
 
241
+ # Get request data
242
+ data = request.get_json()
243
+ if not data:
244
+ return jsonify({
245
+ "error": {
246
+ "type": "invalid_request_error",
247
+ "message": "Request body is required",
248
+ "code": "INVALID_REQUEST"
249
+ }
250
+ }), 400
251
 
252
+ # Validate required fields
253
+ required_fields = ["model", "messages"]
254
+ for field in required_fields:
255
+ if field not in data:
256
+ return jsonify({
257
+ "error": {
258
+ "type": "invalid_request_error",
259
+ "message": f"Missing required field: {field}",
260
+ "code": "INVALID_REQUEST"
261
+ }
262
+ }), 400
263
 
264
+ # Extract parameters
265
+ model = data["model"]
266
+ messages = data["messages"]
267
+ max_tokens = data.get("max_tokens", MAX_TOKENS)
268
+ temperature = data.get("temperature", DEFAULT_TEMPERATURE)
269
 
270
  try:
271
+ # Call Gemini through our API wrapper
272
+ response = api.chat_completion(
273
+ messages=messages,
274
+ model=model,
275
+ max_tokens=max_tokens,
276
+ temperature=temperature,
277
+ stop_sequences=data.get("stop_sequences"),
278
+ top_p=data.get("top_p"),
279
+ top_k=data.get("top_k")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  )
281
 
282
+ if "error" in response:
283
+ return jsonify(response), 500
 
284
 
285
+ return jsonify(response)
286
 
287
+ except Exception as e:
288
+ return jsonify({
289
+ "error": {
290
+ "type": "api_error",
291
+ "message": str(e),
292
+ "code": "INTERNAL_ERROR"
293
+ }
294
+ }), 500
295
+
296
+ @app.route(f"{BASE_PATH}/completions", methods=["POST"])
297
+ def create_completion():
298
+ """Create completion - OpenAI compatible fallback"""
299
+ return create_message() # Same implementation
300
+
301
+ @app.route("/health", methods=["GET"])
302
+ def health_check():
303
+ """Health check endpoint"""
304
+ return jsonify({
305
+ "status": "healthy",
306
+ "service": "Enhanced Gemini Multi-API",
307
+ "version": "1.0.0",
308
+ "timestamp": datetime.now().isoformat(),
309
+ "api_key_configured": bool(GEMINI_API_KEY),
310
+ "available_models": len(MODELS)
311
+ })
312
+
313
+ @app.route("/info", methods=["GET"])
314
+ def api_info():
315
+ """API information endpoint"""
316
+ return jsonify({
317
+ "service": "Enhanced Gemini Multi-API",
318
+ "description": "Google Gemini models through Anthropic-compatible interface",
319
+ "version": "1.0.0",
320
+ "endpoints": {
321
+ "list_models": f"{BASE_PATH}/models",
322
+ "create_message": f"{BASE_PATH}/messages",
323
+ "create_completion": f"{BASE_PATH}/completions",
324
+ "health": "/health",
325
+ "info": "/info"
326
+ },
327
+ "supported_models": list(MODELS.keys()),
328
+ "documentation": "https://docs.anthropic.com/claude/reference",
329
+ "api_key_required": True
330
+ })
331
+
332
+ @app.errorhandler(404)
333
+ def not_found(error):
334
+ return jsonify({
335
+ "error": {
336
+ "type": "not_found_error",
337
+ "message": "Endpoint not found",
338
+ "code": "NOT_FOUND"
339
+ }
340
+ }), 404
341
+
342
+ @app.errorhandler(500)
343
+ def internal_error(error):
344
+ return jsonify({
345
+ "error": {
346
+ "type": "api_error",
347
+ "message": "Internal server error",
348
+ "code": "INTERNAL_ERROR"
349
+ }
350
+ }), 500
351
 
352
  if __name__ == "__main__":
353
+ port = int(os.environ.get("PORT", 8080))
354
+
355
+ if not GEMINI_API_KEY:
356
+ print("⚠️ WARNING: GEMINI_API_KEY not configured")
357
+ print(" Set GEMINI_API_KEY environment variable for full functionality")
358
+ print(" Without API key: Only health and info endpoints will work")
359
 
360
+ print(f"πŸš€ Enhanced Gemini Multi-API starting on port {port}")
361
+ print(f"πŸ“– API Info: http://localhost:{port}/info")
362
+ print(f"❀️ Health Check: http://localhost:{port}/health")
363
+ print(f"πŸ“š Models: POST http://localhost:{port}{BASE_PATH}/messages")
364
 
365
+ app.run(host="0.0.0.0", port=port, debug=False)