From 6b1db9624b96436dc7b3b7bd1b9f9e04397d7e4e Mon Sep 17 00:00:00 2001 From: KatHaruto Date: Tue, 10 Jun 2025 19:30:45 +0900 Subject: [PATCH] fix: add ensure_ascii=False to json.dumps for correct Unicode output --- src/agents/extensions/models/litellm_model.py | 10 +++++++--- src/agents/models/openai_chatcompletions.py | 7 ++++--- src/agents/models/openai_responses.py | 12 +++++++++--- src/agents/tool_context.py | 1 + 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 7cf7a2de5..163e94b27 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -96,7 +96,11 @@ async def get_response( logger.debug("Received model response") else: logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" + f"""LLM resp:\n{ + json.dumps( + response.choices[0].message.model_dump(), indent=2, ensure_ascii=False + ) + }\n""" ) if hasattr(response, "usage"): @@ -263,8 +267,8 @@ async def _fetch_response( else: logger.debug( f"Calling Litellm model: {self.model}\n" - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 6b4045d21..8ce1690b0 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -80,12 +80,13 @@ async def get_response( if message is not None: logger.debug( "LLM resp:\n%s\n", - json.dumps(message.model_dump(), indent=2), + json.dumps(message.model_dump(), indent=2, ensure_ascii=False), ) else: logger.debug( "LLM resp had no message. finish_reason: %s", first_choice.finish_reason, + ensure_ascii=False, ) usage = ( @@ -247,8 +248,8 @@ async def _fetch_response( logger.debug("Calling LLM") else: logger.debug( - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 86c8e69cb..a0e72d604 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -93,7 +93,13 @@ async def get_response( else: logger.debug( "LLM resp:\n" - f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n" + f"""{ + json.dumps( + [x.model_dump() for x in response.output], + indent=2, + ensure_ascii=False, + ) + }\n""" ) usage = ( @@ -237,8 +243,8 @@ async def _fetch_response( else: logger.debug( f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n" + f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/tool_context.py b/src/agents/tool_context.py index 17b595f06..c4329b8af 100644 --- a/src/agents/tool_context.py +++ b/src/agents/tool_context.py @@ -7,6 +7,7 @@ def _assert_must_pass_tool_call_id() -> str: raise ValueError("tool_call_id must be passed to ToolContext") + @dataclass class ToolContext(RunContextWrapper[TContext]): """The context of a tool call."""