diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index dc672acd..432c1b2c 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -73,6 +73,7 @@ async def get_response( previous_response_id: str | None, ) -> ModelResponse: with generation_span( + input=input if tracing.include_data() else None, model=str(self.model), model_config=model_settings.to_json_dict() | {"base_url": str(self.base_url or ""), "model_impl": "litellm"}, @@ -145,6 +146,7 @@ async def stream_response( previous_response_id: str | None, ) -> AsyncIterator[TResponseStreamEvent]: with generation_span( + input=input if tracing.include_data() else None, model=str(self.model), model_config=model_settings.to_json_dict() | {"base_url": str(self.base_url or ""), "model_impl": "litellm"}, diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 89619f83..e4c3f72e 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -54,6 +54,7 @@ async def get_response( previous_response_id: str | None, ) -> ModelResponse: with generation_span( + input=input if tracing.include_data() else None, model=str(self.model), model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)}, disabled=tracing.is_disabled(), @@ -118,6 +119,7 @@ async def stream_response( Yields a partial message as it is generated, as well as the usage information. """ with generation_span( + input=input if tracing.include_data() else None, model=str(self.model), model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)}, disabled=tracing.is_disabled(),