Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.pandaprobe.com/llms.txt

Use this file to discover all available pages before exploring further.

Installation

pip install "pandaprobe[google-adk]"

Setup

from pandaprobe.integrations.google_adk import GoogleADKAdapter

adapter = GoogleADKAdapter(
    session_id="conversation-123",
    user_id="user-abc",
    tags=["production"],
)
adapter.instrument()
We recommend using UUIDs for session_id and user_id so traces can be grouped reliably across runs.
Call instrument() once at application startup, before creating any ADK agents or runners.

Usage

from google.adk.agents import LlmAgent
from google.adk.runners import Runner

agent = LlmAgent(
    name="my-agent",
    model="gemini-2.5-flash",
    instruction="You are a helpful assistant.",
)

runner = Runner(agent=agent, app_name="my-app", session_service=session_service)

async for event in runner.run_async(session_id=session.id, user_id="user-1", new_message=content):
    if event.content and event.content.parts:
        print(event.content.parts[0].text, end="")

What gets traced

ADK ComponentSpan KindDescription
Runner.run_asyncCHAINRoot trace boundary
BaseAgent.run_asyncAGENTAgent execution with session messages as input
BaseLlmFlow._call_llm_asyncLLMLLM calls with model, params, thinking, token usage, TTFT
BaseTool.run_asyncTOOLBase tool execution
FunctionTool.run_asyncTOOLFunction tool execution
McpTool.run_asyncTOOLMCP tool execution

Token usage mapping

ADK FieldPandaProbe Field
prompt_token_countprompt_tokens
candidates_token_countcompletion_tokens
total_token_counttotal_tokens
thoughts_token_countreasoning_tokens
cached_content_token_countcache_read_tokens

Thinking mode

When using Gemini models with thinking enabled, thought parts are automatically separated from answer parts. Thinking content is stored in metadata as reasoning_summary.

Example with tools

This example defines two function tools and runs an LlmAgent asynchronously using GoogleADKAdapter. We trace the agent via GoogleADKAdapter:
import asyncio
import uuid

from google.adk.agents import LlmAgent
from google.adk.runners import Runner
from google.adk.sessions import InMemorySessionService
from google.genai.types import Content, Part

import pandaprobe
from pandaprobe.integrations.google_adk import GoogleADKAdapter


def get_weather(city: str) -> dict:
    """Get the current weather for a city."""
    weather_data = {
        "london": {"condition": "Cloudy", "temp": "15°C", "humidity": "70%"},
        "tokyo": {"condition": "Sunny", "temp": "28°C", "humidity": "45%"},
        "new york": {"condition": "Partly cloudy", "temp": "22°C", "humidity": "55%"},
        "paris": {"condition": "Rainy", "temp": "12°C", "humidity": "85%"},
    }
    return weather_data.get(city.lower(), {"error": f"No data for {city}"})


def get_population(city: str) -> dict:
    """Get the approximate population of a city."""
    populations = {
        "london": {"population": "8.8 million"},
        "tokyo": {"population": "13.9 million"},
        "new york": {"population": "8.3 million"},
        "paris": {"population": "2.2 million"},
    }
    return populations.get(city.lower(), {"error": f"No data for {city}"})


agent = LlmAgent(
    name="city_info_agent",
    model="gemini-3.1-flash-lite-preview",
    instruction=(
        "You are a helpful assistant with access to weather and population tools. "
        "Use the tools to answer questions about cities."
    ),
    tools=[get_weather, get_population],
)

APP_NAME = "tool_agent"
USER_ID = "user_1"
SESSION_ID = str(uuid.uuid4())


async def main():
    adapter = GoogleADKAdapter(
        session_id=SESSION_ID,
        user_id=USER_ID,
        tags=["tool-agent", "example"],
    )
    adapter.instrument()

    session_service = InMemorySessionService()
    await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID)

    runner = Runner(agent=agent, app_name=APP_NAME, session_service=session_service)

    user_message = Content(
        role="user",
        parts=[Part(text="What's the weather like in London and what's its population?")],
    )

    async for event in runner.run_async(user_id=USER_ID, session_id=SESSION_ID, new_message=user_message):
        if event.is_final_response():
            text = " ".join(p.text for p in event.content.parts if p.text)
            print(f"Agent: {text}")

    pandaprobe.flush()
    pandaprobe.shutdown()
    print("\nTrace sent to PandaProbe backend.")


if __name__ == "__main__":
    asyncio.run(main())
This produces a trace with: CHAIN (runner) → AGENT (city_info_agent) → LLM (model call) → TOOL (get_weather) → TOOL (get_population) → LLM (final response).