Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
b843268
1
Parent(s):
becbb85
Upd genai lib
Browse files- gemini_mcp.py +24 -54
gemini_mcp.py
CHANGED
|
@@ -25,6 +25,7 @@ except ImportError:
|
|
| 25 |
# Gemini imports
|
| 26 |
try:
|
| 27 |
from google import genai
|
|
|
|
| 28 |
except ImportError:
|
| 29 |
print("Error: google-genai not installed. Install with: pip install google-genai", file=sys.stderr)
|
| 30 |
sys.exit(1)
|
|
@@ -39,7 +40,8 @@ if not GEMINI_API_KEY:
|
|
| 39 |
logger.error("GEMINI_API_KEY not set in environment variables")
|
| 40 |
sys.exit(1)
|
| 41 |
|
| 42 |
-
|
|
|
|
| 43 |
|
| 44 |
# Configuration from environment
|
| 45 |
GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
|
|
@@ -165,73 +167,41 @@ async def call_tool(name: str, arguments: dict) -> Sequence[TextContent | ImageC
|
|
| 165 |
model = arguments.get("model", GEMINI_MODEL)
|
| 166 |
temperature = float(arguments.get("temperature", GEMINI_TEMPERATURE))
|
| 167 |
|
| 168 |
-
# Prepare
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
logger.error(f"Error loading model {model}: {e}")
|
| 173 |
-
return [TextContent(type="text", text=f"Error: Failed to load model {model}")]
|
| 174 |
-
|
| 175 |
-
# Prepare content parts
|
| 176 |
-
parts = []
|
| 177 |
|
| 178 |
-
#
|
| 179 |
if system_prompt:
|
| 180 |
-
|
| 181 |
-
generation_config = genai.types.GenerationConfig(
|
| 182 |
-
temperature=temperature,
|
| 183 |
-
max_output_tokens=GEMINI_MAX_OUTPUT_TOKENS
|
| 184 |
-
)
|
| 185 |
-
else:
|
| 186 |
-
generation_config = genai.types.GenerationConfig(
|
| 187 |
-
temperature=temperature,
|
| 188 |
-
max_output_tokens=GEMINI_MAX_OUTPUT_TOKENS
|
| 189 |
-
)
|
| 190 |
-
|
| 191 |
-
# Prepare content parts for Gemini
|
| 192 |
-
# Gemini API accepts a list where each part can be:
|
| 193 |
-
# - A string (for text)
|
| 194 |
-
# - A dict with "mime_type" and "data" keys (for binary data)
|
| 195 |
-
content_parts = []
|
| 196 |
|
| 197 |
-
#
|
|
|
|
|
|
|
| 198 |
if files:
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
# Use genai.types.Part or dict format
|
| 202 |
-
content_parts.append({
|
| 203 |
-
"mime_type": file_part["mime_type"],
|
| 204 |
-
"data": file_part["data"]
|
| 205 |
-
})
|
| 206 |
-
|
| 207 |
-
# Add text prompt (as string)
|
| 208 |
-
content_parts.append(user_prompt)
|
| 209 |
|
| 210 |
-
# Generate content
|
| 211 |
try:
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
system_instruction=system_prompt
|
| 219 |
-
)
|
| 220 |
-
else:
|
| 221 |
-
response = await asyncio.to_thread(
|
| 222 |
-
gemini_model.generate_content,
|
| 223 |
-
content_parts,
|
| 224 |
-
generation_config=generation_config
|
| 225 |
-
)
|
| 226 |
|
| 227 |
# Extract text from response
|
| 228 |
-
if response and response.text:
|
| 229 |
return [TextContent(type="text", text=response.text)]
|
| 230 |
else:
|
| 231 |
return [TextContent(type="text", text="Error: No response from Gemini")]
|
| 232 |
|
| 233 |
except Exception as e:
|
| 234 |
logger.error(f"Error generating content: {e}")
|
|
|
|
|
|
|
| 235 |
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
| 236 |
|
| 237 |
except Exception as e:
|
|
|
|
| 25 |
# Gemini imports
|
| 26 |
try:
|
| 27 |
from google import genai
|
| 28 |
+
GEMINI_AVAILABLE = True
|
| 29 |
except ImportError:
|
| 30 |
print("Error: google-genai not installed. Install with: pip install google-genai", file=sys.stderr)
|
| 31 |
sys.exit(1)
|
|
|
|
| 40 |
logger.error("GEMINI_API_KEY not set in environment variables")
|
| 41 |
sys.exit(1)
|
| 42 |
|
| 43 |
+
# Initialize Gemini client
|
| 44 |
+
gemini_client = genai.Client(api_key=GEMINI_API_KEY)
|
| 45 |
|
| 46 |
# Configuration from environment
|
| 47 |
GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
|
|
|
|
| 167 |
model = arguments.get("model", GEMINI_MODEL)
|
| 168 |
temperature = float(arguments.get("temperature", GEMINI_TEMPERATURE))
|
| 169 |
|
| 170 |
+
# Prepare content for Gemini API
|
| 171 |
+
# The API accepts contents as a string or list
|
| 172 |
+
# For files, we need to handle them differently
|
| 173 |
+
contents = user_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
+
# If system prompt is provided, prepend it to the user prompt
|
| 176 |
if system_prompt:
|
| 177 |
+
contents = f"{system_prompt}\n\n{user_prompt}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
+
# Note: The simple API doesn't support files or temperature directly
|
| 180 |
+
# For files, we would need to use a different approach or encode them
|
| 181 |
+
# For now, we'll handle text-only requests
|
| 182 |
if files:
|
| 183 |
+
logger.warning("File support not available in simple API mode. Processing text only.")
|
| 184 |
+
# Could potentially encode files as base64 in the prompt, but keeping it simple for now
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
+
# Generate content using the simple API
|
| 187 |
try:
|
| 188 |
+
# Use asyncio.to_thread to make the blocking call async
|
| 189 |
+
response = await asyncio.to_thread(
|
| 190 |
+
gemini_client.models.generate_content,
|
| 191 |
+
model=model,
|
| 192 |
+
contents=contents
|
| 193 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
|
| 195 |
# Extract text from response
|
| 196 |
+
if response and hasattr(response, 'text') and response.text:
|
| 197 |
return [TextContent(type="text", text=response.text)]
|
| 198 |
else:
|
| 199 |
return [TextContent(type="text", text="Error: No response from Gemini")]
|
| 200 |
|
| 201 |
except Exception as e:
|
| 202 |
logger.error(f"Error generating content: {e}")
|
| 203 |
+
import traceback
|
| 204 |
+
logger.debug(traceback.format_exc())
|
| 205 |
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
| 206 |
|
| 207 |
except Exception as e:
|