Skip to content

Enhance Gemini usage tracking to collect comprehensive token data #1752

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
May 28, 2025
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 48 additions & 3 deletions pydantic_ai_slim/pydantic_ai/models/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,13 +438,12 @@ async def _get_gemini_responses(self) -> AsyncIterator[_GeminiResponse]:
responses_to_yield = gemini_responses[:-1]
for r in responses_to_yield[current_gemini_response_index:]:
current_gemini_response_index += 1
self._usage += _metadata_as_usage(r)
yield r

# Now yield the final response, which should be complete
if gemini_responses: # pragma: no branch
r = gemini_responses[-1]
self._usage += _metadata_as_usage(r)
self._usage = _metadata_as_usage(r)
yield r

@property
Expand Down Expand Up @@ -737,8 +736,17 @@ class _GeminiCandidates(TypedDict):
safety_ratings: NotRequired[Annotated[list[_GeminiSafetyRating], pydantic.Field(alias='safetyRatings')]]


class _GeminiModalityTokenCount(TypedDict):
"""See <https://ai.google.dev/api/generate-content#modalitytokencount>."""

modality: Annotated[
Literal['MODALITY_UNSPECIFIED', 'TEXT', 'IMAGE', 'VIDEO', 'AUDIO', 'DOCUMENT'], pydantic.Field(alias='modality')
]
token_count: Annotated[int, pydantic.Field(alias='tokenCount', default=0)]


class _GeminiUsageMetaData(TypedDict, total=False):
"""See <https://ai.google.dev/api/generate-content#FinishReason>.
"""See <https://ai.google.dev/api/generate-content#UsageMetadata>.

The docs suggest all fields are required, but some are actually not required, so we assume they are all optional.
"""
Expand All @@ -747,6 +755,20 @@ class _GeminiUsageMetaData(TypedDict, total=False):
candidates_token_count: NotRequired[Annotated[int, pydantic.Field(alias='candidatesTokenCount')]]
total_token_count: Annotated[int, pydantic.Field(alias='totalTokenCount')]
cached_content_token_count: NotRequired[Annotated[int, pydantic.Field(alias='cachedContentTokenCount')]]
thoughts_token_count: NotRequired[Annotated[int, pydantic.Field(alias='thoughtsTokenCount')]]
tool_use_prompt_token_count: NotRequired[Annotated[int, pydantic.Field(alias='toolUsePromptTokenCount')]]
prompt_tokens_details: NotRequired[
Annotated[list[_GeminiModalityTokenCount], pydantic.Field(alias='promptTokensDetails')]
]
cache_tokens_details: NotRequired[
Annotated[list[_GeminiModalityTokenCount], pydantic.Field(alias='cacheTokensDetails')]
]
candidates_tokens_details: NotRequired[
Annotated[list[_GeminiModalityTokenCount], pydantic.Field(alias='candidatesTokensDetails')]
]
tool_use_prompt_tokens_details: NotRequired[
Annotated[list[_GeminiModalityTokenCount], pydantic.Field(alias='toolUsePromptTokensDetails')]
]


def _metadata_as_usage(response: _GeminiResponse) -> usage.Usage:
Expand All @@ -756,6 +778,29 @@ def _metadata_as_usage(response: _GeminiResponse) -> usage.Usage:
details: dict[str, int] = {}
if cached_content_token_count := metadata.get('cached_content_token_count'):
details['cached_content_token_count'] = cached_content_token_count # pragma: no cover

if thoughts_token_count := metadata.get('thoughts_token_count'):
details['thoughts_token_count'] = thoughts_token_count

if tool_use_prompt_token_count := metadata.get('tool_use_prompt_token_count'):
details['tool_use_prompt_token_count'] = tool_use_prompt_token_count # pragma: no cover

detailed_keys_map: dict[str, str] = {
'prompt_tokens_details': 'prompt_tokens',
'cache_tokens_details': 'cache_tokens',
'candidates_tokens_details': 'candidates_tokens',
'tool_use_prompt_tokens_details': 'tool_use_prompt_tokens',
}

details.update(
{
f'{detail["modality"].lower()}_{suffix}': detail['token_count']
for key, suffix in detailed_keys_map.items()
if (metadata_details := metadata.get(key))
for detail in metadata_details
}
)

return usage.Usage(
request_tokens=metadata.get('prompt_token_count', 0),
response_tokens=metadata.get('candidates_token_count', 0),
Expand Down
36 changes: 27 additions & 9 deletions tests/models/test_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -732,12 +732,12 @@ async def test_stream_text(get_gemini_client: GetGeminiClient):
'Hello world',
]
)
assert result.usage() == snapshot(Usage(requests=1, request_tokens=2, response_tokens=4, total_tokens=6))
assert result.usage() == snapshot(Usage(requests=1, request_tokens=1, response_tokens=2, total_tokens=3))

async with agent.run_stream('Hello') as result:
chunks = [chunk async for chunk in result.stream_text(delta=True, debounce_by=None)]
assert chunks == snapshot(['Hello ', 'world'])
assert result.usage() == snapshot(Usage(requests=1, request_tokens=2, response_tokens=4, total_tokens=6))
assert result.usage() == snapshot(Usage(requests=1, request_tokens=1, response_tokens=2, total_tokens=3))


async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient):
Expand Down Expand Up @@ -769,7 +769,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient):
async with agent.run_stream('Hello') as result:
chunks = [chunk async for chunk in result.stream(debounce_by=None)]
assert chunks == snapshot(['abc', 'abc€def', 'abc€def'])
assert result.usage() == snapshot(Usage(requests=1, request_tokens=2, response_tokens=4, total_tokens=6))
assert result.usage() == snapshot(Usage(requests=1, request_tokens=1, response_tokens=2, total_tokens=3))


async def test_stream_text_no_data(get_gemini_client: GetGeminiClient):
Expand Down Expand Up @@ -840,7 +840,7 @@ async def bar(y: str) -> str:
async with agent.run_stream('Hello') as result:
response = await result.get_output()
assert response == snapshot((1, 2))
assert result.usage() == snapshot(Usage(requests=2, request_tokens=3, response_tokens=6, total_tokens=9))
assert result.usage() == snapshot(Usage(requests=2, request_tokens=2, response_tokens=4, total_tokens=6))
assert result.all_messages() == snapshot(
[
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
Expand All @@ -849,7 +849,7 @@ async def bar(y: str) -> str:
ToolCallPart(tool_name='foo', args={'x': 'a'}, tool_call_id=IsStr()),
ToolCallPart(tool_name='bar', args={'y': 'b'}, tool_call_id=IsStr()),
],
usage=Usage(request_tokens=2, response_tokens=4, total_tokens=6),
usage=Usage(request_tokens=1, response_tokens=2, total_tokens=3, details={}),
model_name='gemini-1.5-flash',
timestamp=IsNow(tz=timezone.utc),
),
Expand All @@ -865,7 +865,7 @@ async def bar(y: str) -> str:
),
ModelResponse(
parts=[ToolCallPart(tool_name='final_result', args={'response': [1, 2]}, tool_call_id=IsStr())],
usage=Usage(request_tokens=1, response_tokens=2, total_tokens=3),
usage=Usage(request_tokens=1, response_tokens=2, total_tokens=3, details={}),
model_name='gemini-1.5-flash',
timestamp=IsNow(tz=timezone.utc),
),
Expand Down Expand Up @@ -1096,7 +1096,13 @@ async def get_image() -> BinaryContent:
),
ToolCallPart(tool_name='get_image', args={}, tool_call_id=IsStr()),
],
usage=Usage(requests=1, request_tokens=38, response_tokens=28, total_tokens=427, details={}),
usage=Usage(
requests=1,
request_tokens=38,
response_tokens=28,
total_tokens=427,
details={'thoughts_token_count': 361, 'text_prompt_tokens': 38},
),
model_name='gemini-2.5-pro-preview-03-25',
timestamp=IsDatetime(),
),
Expand All @@ -1119,7 +1125,13 @@ async def get_image() -> BinaryContent:
),
ModelResponse(
parts=[TextPart(content='The image shows a kiwi fruit, sliced in half.')],
usage=Usage(requests=1, request_tokens=360, response_tokens=11, total_tokens=572, details={}),
usage=Usage(
requests=1,
request_tokens=360,
response_tokens=11,
total_tokens=572,
details={'thoughts_token_count': 201, 'text_prompt_tokens': 102, 'image_prompt_tokens': 258},
),
model_name='gemini-2.5-pro-preview-03-25',
timestamp=IsDatetime(),
),
Expand Down Expand Up @@ -1229,7 +1241,13 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_
),
ModelResponse(
parts=[TextPart(content='The capital of France is Paris.\n')],
usage=Usage(requests=1, request_tokens=13, response_tokens=8, total_tokens=21, details={}),
usage=Usage(
requests=1,
request_tokens=13,
response_tokens=8,
total_tokens=21,
details={'text_prompt_tokens': 13, 'text_candidates_tokens': 8},
),
model_name='gemini-1.5-flash',
timestamp=IsDatetime(),
),
Expand Down