Created
March 26, 2026 20:26
-
-
Save clutchski/43cef91a699e8732d83db6bf89750aa4 to your computer and use it in GitHub Desktop.
ADK Python + Braintrust Gateway: multi-provider model calls
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """Test multiple models through the Braintrust gateway using ADK's AnthropicLlm. | |
| The gateway handles protocol translation, so we use AnthropicLlm for both | |
| Anthropic and OpenAI models. | |
| """ | |
| import os | |
| os.environ["ANTHROPIC_BASE_URL"] = "https://gateway.braintrust.dev" | |
| os.environ["ANTHROPIC_API_KEY"] = os.environ["BRAINTRUST_API_KEY"] | |
| import asyncio | |
| from google.adk.models.anthropic_llm import AnthropicLlm | |
| from google.adk.models.llm_request import LlmRequest | |
| from google.genai import types | |
| MODELS = [ | |
| "claude-haiku-4-5", | |
| "gpt-4o-mini", | |
| ] | |
| async def call_model(model: str, prompt: str) -> str: | |
| llm = AnthropicLlm(model=model) | |
| request = LlmRequest( | |
| model=model, | |
| contents=[types.Content(role="user", parts=[types.Part.from_text(text=prompt)])], | |
| config=types.GenerateContentConfig(system_instruction="Be concise."), | |
| ) | |
| async for response in llm.generate_content_async(request): | |
| if response.content and response.content.parts: | |
| return " ".join(p.text for p in response.content.parts if p.text) | |
| return "" | |
| async def main(): | |
| prompt = "What is the capital of France? One sentence." | |
| for model in MODELS: | |
| print(f"\n{model}:") | |
| print(f" {await call_model(model, prompt)}") | |
| if __name__ == "__main__": | |
| asyncio.run(main()) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """Test tool use with multiple models through the Braintrust gateway. | |
| Known bug: OpenAI models fail on parallel tool calls -- tool_call_id mismatch | |
| on the second turn when the gateway translates back from Anthropic format. | |
| """ | |
| import os | |
| os.environ["ANTHROPIC_BASE_URL"] = "https://gateway.braintrust.dev" | |
| os.environ["ANTHROPIC_API_KEY"] = os.environ["BRAINTRUST_API_KEY"] | |
| import asyncio | |
| import datetime | |
| from zoneinfo import ZoneInfo | |
| from google.adk.agents import Agent | |
| from google.adk.models.anthropic_llm import AnthropicLlm | |
| from google.adk.runners import Runner | |
| from google.adk.sessions import InMemorySessionService | |
| from google.genai import types | |
| MODELS = [ | |
| "claude-haiku-4-5", | |
| "gpt-4o-mini", | |
| ] | |
| def get_weather(city: str) -> dict: | |
| """Retrieves the current weather for a given city.""" | |
| weather_data = { | |
| "new york": "Sunny, 24C (75F), light breeze from the west.", | |
| "london": "Overcast, 14C (57F), chance of rain.", | |
| "tokyo": "Partly cloudy, 28C (82F), humid.", | |
| "san francisco": "Foggy, 16C (61F), clearing by afternoon.", | |
| } | |
| report = weather_data.get(city.lower()) | |
| if report: | |
| return {"result": f"Weather in {city}: {report}"} | |
| return {"result": f"No weather data for '{city}'."} | |
| def get_current_time(city: str) -> dict: | |
| """Returns the current time in a given city.""" | |
| tz_map = { | |
| "new york": "America/New_York", | |
| "london": "Europe/London", | |
| "tokyo": "Asia/Tokyo", | |
| "san francisco": "America/Los_Angeles", | |
| } | |
| tz_name = tz_map.get(city.lower()) | |
| if tz_name: | |
| now = datetime.datetime.now(ZoneInfo(tz_name)) | |
| return {"result": f"Current time in {city}: {now.strftime('%Y-%m-%d %H:%M:%S %Z')}"} | |
| return {"result": f"Unknown timezone for '{city}'."} | |
| async def run_agent(model_name: str): | |
| print(f"\n{'='*60}") | |
| print(f"Model: {model_name}") | |
| print(f"{'='*60}") | |
| agent = Agent( | |
| name="bt_gateway_agent", | |
| model=AnthropicLlm(model=model_name), | |
| instruction="You are a helpful assistant. Be concise.", | |
| tools=[get_weather, get_current_time], | |
| ) | |
| session_service = InMemorySessionService() | |
| runner = Runner(agent=agent, app_name="test", session_service=session_service) | |
| session = await session_service.create_session(app_name="test", user_id="test-user") | |
| query = "What's the weather and time in San Francisco?" | |
| print(f"\n--- User: {query}") | |
| content = types.Content( | |
| role="user", parts=[types.Part.from_text(text=query)] | |
| ) | |
| try: | |
| async for event in runner.run_async( | |
| user_id="test-user", session_id=session.id, new_message=content | |
| ): | |
| if event.content and event.content.parts: | |
| for part in event.content.parts: | |
| if part.text: | |
| print(f" [text] {part.text}") | |
| elif part.function_call: | |
| print(f" [tool call] {part.function_call.name}({dict(part.function_call.args)})") | |
| elif part.function_response: | |
| print(f" [tool result] {part.function_response.name} -> {dict(part.function_response.response)}") | |
| if event.is_final_response(): | |
| final = "" | |
| if event.content and event.content.parts: | |
| final = " ".join(p.text for p in event.content.parts if p.text) | |
| print(f"--- Agent: {final}") | |
| except Exception as e: | |
| print(f"--- ERROR: {e}") | |
| async def main(): | |
| for model in MODELS: | |
| await run_agent(model) | |
| if __name__ == "__main__": | |
| asyncio.run(main()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment