From e3afc396f2de5c1d42034d8b0b17d83605c52d75 Mon Sep 17 00:00:00 2001 From: William Horton Date: Mon, 19 May 2025 13:38:53 -0400 Subject: [PATCH] Handle the case where OpenAI ChatCompletion created is None The OpenAI spec defines this as required, but other OpenAI-compatible providers (like OpenRouter) may not populate it on all responses. This adds code to handle the case where the field is None. Fixes #1746 --- pydantic_ai_slim/pydantic_ai/models/openai.py | 14 ++++++-- tests/models/test_openai.py | 36 +++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 6e999fdef..1b25bb6ca 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -14,7 +14,7 @@ from pydantic_ai.providers import Provider, infer_provider from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage -from .._utils import guard_tool_call_id as _guard_tool_call_id +from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc from ..messages import ( AudioUrl, BinaryContent, @@ -298,7 +298,11 @@ async def _completions_create( def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = datetime.fromtimestamp(response.created, tz=timezone.utc) + if response.created: + timestamp = datetime.fromtimestamp(response.created, tz=timezone.utc) + else: + timestamp = _now_utc() + choice = response.choices[0] items: list[ModelResponsePart] = [] if choice.message.content is not None: @@ -554,7 +558,11 @@ def customize_request_parameters(self, model_request_parameters: ModelRequestPar def _process_response(self, response: responses.Response) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = datetime.fromtimestamp(response.created_at, tz=timezone.utc) + if response.created_at: + timestamp = datetime.fromtimestamp(response.created_at, tz=timezone.utc) + else: + timestamp = _now_utc() + items: list[ModelResponsePart] = [] items.append(TextPart(response.output_text)) for item in response.output: diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 75a0160b3..5b9684c0e 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -140,6 +140,19 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage ) +def completion_message_created_none( + message: ChatCompletionMessage, *, usage: CompletionUsage | None = None +) -> chat.ChatCompletion: + return chat.ChatCompletion.model_construct( + created=None, + id='123', + choices=[Choice(finish_reason='stop', index=0, message=message)], + model='gpt-4o-123', + object='chat.completion', + usage=usage, + ) + + async def test_request_simple_success(allow_model_requests: None): c = completion_message(ChatCompletionMessage(content='world', role='assistant')) mock_client = MockOpenAI.create_mock(c) @@ -389,6 +402,29 @@ async def get_location(loc_name: str) -> str: ) +async def test_request_created_at_none(allow_model_requests: None): + c = completion_message_created_none( + ChatCompletionMessage(content='world', role='assistant'), + ) + mock_client = MockOpenAI.create_mock(c) + m = OpenAIModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client)) + agent = Agent(m) + + result = await agent.run('Hello') + assert result.output == 'world' + assert result.all_messages() == snapshot( + [ + ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelResponse( + parts=[TextPart(content='world')], + usage=Usage(requests=1), + model_name='gpt-4o-123', + timestamp=IsNow(tz=timezone.utc), + ), + ] + ) + + FinishReason = Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call']