Skip to content

Upgrade openAI sdk version #730

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ requires-python = ">=3.9"
license = "MIT"
authors = [{ name = "OpenAI", email = "support@openai.com" }]
dependencies = [
"openai>=1.76.0",
"openai>=1.81.0",
"pydantic>=2.10, <3",
"griffe>=1.5.6, <2",
"typing-extensions>=4.12.2, <5",
Expand Down
26 changes: 25 additions & 1 deletion src/agents/models/chatcmpl_stream_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,16 @@ class StreamingState:
function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)


class SequenceNumber:
def __init__(self):
self._sequence_number = 0

def get_and_increment(self) -> int:
num = self._sequence_number
self._sequence_number += 1
return num


class ChatCmplStreamHandler:
@classmethod
async def handle_stream(
Expand All @@ -47,13 +57,14 @@ async def handle_stream(
) -> AsyncIterator[TResponseStreamEvent]:
usage: CompletionUsage | None = None
state = StreamingState()

sequence_number = SequenceNumber()
async for chunk in stream:
if not state.started:
state.started = True
yield ResponseCreatedEvent(
response=response,
type="response.created",
sequence_number=sequence_number.get_and_increment(),
)

# This is always set by the OpenAI API, but not by others e.g. LiteLLM
Expand Down Expand Up @@ -89,6 +100,7 @@ async def handle_stream(
item=assistant_item,
output_index=0,
type="response.output_item.added",
sequence_number=sequence_number.get_and_increment(),
)
yield ResponseContentPartAddedEvent(
content_index=state.text_content_index_and_output[0],
Expand All @@ -100,6 +112,7 @@ async def handle_stream(
annotations=[],
),
type="response.content_part.added",
sequence_number=sequence_number.get_and_increment(),
)
# Emit the delta for this segment of content
yield ResponseTextDeltaEvent(
Expand All @@ -108,6 +121,7 @@ async def handle_stream(
item_id=FAKE_RESPONSES_ID,
output_index=0,
type="response.output_text.delta",
sequence_number=sequence_number.get_and_increment(),
)
# Accumulate the text into the response part
state.text_content_index_and_output[1].text += delta.content
Expand All @@ -134,6 +148,7 @@ async def handle_stream(
item=assistant_item,
output_index=0,
type="response.output_item.added",
sequence_number=sequence_number.get_and_increment(),
)
yield ResponseContentPartAddedEvent(
content_index=state.refusal_content_index_and_output[0],
Expand All @@ -145,6 +160,7 @@ async def handle_stream(
annotations=[],
),
type="response.content_part.added",
sequence_number=sequence_number.get_and_increment(),
)
# Emit the delta for this segment of refusal
yield ResponseRefusalDeltaEvent(
Expand All @@ -153,6 +169,7 @@ async def handle_stream(
item_id=FAKE_RESPONSES_ID,
output_index=0,
type="response.refusal.delta",
sequence_number=sequence_number.get_and_increment(),
)
# Accumulate the refusal string in the output part
state.refusal_content_index_and_output[1].refusal += delta.refusal
Expand Down Expand Up @@ -190,6 +207,7 @@ async def handle_stream(
output_index=0,
part=state.text_content_index_and_output[1],
type="response.content_part.done",
sequence_number=sequence_number.get_and_increment(),
)

if state.refusal_content_index_and_output:
Expand All @@ -201,6 +219,7 @@ async def handle_stream(
output_index=0,
part=state.refusal_content_index_and_output[1],
type="response.content_part.done",
sequence_number=sequence_number.get_and_increment(),
)

# Actually send events for the function calls
Expand All @@ -216,13 +235,15 @@ async def handle_stream(
),
output_index=function_call_starting_index,
type="response.output_item.added",
sequence_number=sequence_number.get_and_increment(),
)
# Then, yield the args
yield ResponseFunctionCallArgumentsDeltaEvent(
delta=function_call.arguments,
item_id=FAKE_RESPONSES_ID,
output_index=function_call_starting_index,
type="response.function_call_arguments.delta",
sequence_number=sequence_number.get_and_increment(),
)
# Finally, the ResponseOutputItemDone
yield ResponseOutputItemDoneEvent(
Expand All @@ -235,6 +256,7 @@ async def handle_stream(
),
output_index=function_call_starting_index,
type="response.output_item.done",
sequence_number=sequence_number.get_and_increment(),
)

# Finally, send the Response completed event
Expand All @@ -258,6 +280,7 @@ async def handle_stream(
item=assistant_msg,
output_index=0,
type="response.output_item.done",
sequence_number=sequence_number.get_and_increment(),
)

for function_call in state.function_calls.values():
Expand Down Expand Up @@ -289,4 +312,5 @@ async def handle_stream(
yield ResponseCompletedEvent(
response=final_response,
type="response.completed",
sequence_number=sequence_number.get_and_increment(),
)
16 changes: 5 additions & 11 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from openai.types.responses import (
Response,
ResponseCompletedEvent,
ResponseIncludable,
ResponseStreamEvent,
ResponseTextConfigParam,
ToolParam,
Expand All @@ -36,13 +37,6 @@
_USER_AGENT = f"Agents/Python {__version__}"
_HEADERS = {"User-Agent": _USER_AGENT}

# From the Responses API
IncludeLiteral = Literal[
"file_search_call.results",
"message.input_image.image_url",
"computer_call_output.output.image_url",
]


class OpenAIResponsesModel(Model):
"""
Expand Down Expand Up @@ -273,7 +267,7 @@ def _get_client(self) -> AsyncOpenAI:
@dataclass
class ConvertedTools:
tools: list[ToolParam]
includes: list[IncludeLiteral]
includes: list[ResponseIncludable]


class Converter:
Expand Down Expand Up @@ -330,7 +324,7 @@ def convert_tools(
handoffs: list[Handoff[Any]],
) -> ConvertedTools:
converted_tools: list[ToolParam] = []
includes: list[IncludeLiteral] = []
includes: list[ResponseIncludable] = []

computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)]
if len(computer_tools) > 1:
Expand All @@ -348,7 +342,7 @@ def convert_tools(
return ConvertedTools(tools=converted_tools, includes=includes)

@classmethod
def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, ResponseIncludable | None]:
"""Returns converted tool and includes"""

if isinstance(tool, FunctionTool):
Expand All @@ -359,7 +353,7 @@ def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
"type": "function",
"description": tool.description,
}
includes: IncludeLiteral | None = None
includes: ResponseIncludable | None = None
elif isinstance(tool, WebSearchTool):
ws: WebSearchToolParam = {
"type": "web_search_preview",
Expand Down
1 change: 1 addition & 0 deletions tests/fake_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ async def stream_response(
yield ResponseCompletedEvent(
type="response.completed",
response=get_response_obj(output, usage=self.hardcoded_usage),
sequence_number=0,
)


Expand Down
4 changes: 4 additions & 0 deletions tests/test_responses_tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def __aiter__(self):
yield ResponseCompletedEvent(
type="response.completed",
response=fake_model.get_response_obj(self.output),
sequence_number=0,
)


Expand Down Expand Up @@ -201,6 +202,7 @@ async def __aiter__(self):
yield ResponseCompletedEvent(
type="response.completed",
response=fake_model.get_response_obj([], "dummy-id-123"),
sequence_number=0,
)

return DummyStream()
Expand Down Expand Up @@ -253,6 +255,7 @@ async def __aiter__(self):
yield ResponseCompletedEvent(
type="response.completed",
response=fake_model.get_response_obj([], "dummy-id-123"),
sequence_number=0,
)

return DummyStream()
Expand Down Expand Up @@ -304,6 +307,7 @@ async def __aiter__(self):
yield ResponseCompletedEvent(
type="response.completed",
response=fake_model.get_response_obj([], "dummy-id-123"),
sequence_number=0,
)

return DummyStream()
Expand Down
2 changes: 2 additions & 0 deletions tests/voice/test_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,13 @@ async def stream_response(
type="response.output_text.delta",
output_index=0,
item_id=item.id,
sequence_number=0,
)

yield ResponseCompletedEvent(
type="response.completed",
response=get_response_obj(output),
sequence_number=1,
)


Expand Down
10 changes: 5 additions & 5 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.