Skip to content

Commit ce2e2a4

Browse files
authored
Upgrade openAI sdk version (#730)
--- [//]: # (BEGIN SAPLING FOOTER) * #732 * #731 * __->__ #730
1 parent 466b44d commit ce2e2a4

File tree

7 files changed

+43
-18
lines changed

7 files changed

+43
-18
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requires-python = ">=3.9"
77
license = "MIT"
88
authors = [{ name = "OpenAI", email = "support@openai.com" }]
99
dependencies = [
10-
"openai>=1.76.0",
10+
"openai>=1.81.0",
1111
"pydantic>=2.10, <3",
1212
"griffe>=1.5.6, <2",
1313
"typing-extensions>=4.12.2, <5",

src/agents/models/chatcmpl_stream_handler.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,16 @@ class StreamingState:
3838
function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
3939

4040

41+
class SequenceNumber:
42+
def __init__(self):
43+
self._sequence_number = 0
44+
45+
def get_and_increment(self) -> int:
46+
num = self._sequence_number
47+
self._sequence_number += 1
48+
return num
49+
50+
4151
class ChatCmplStreamHandler:
4252
@classmethod
4353
async def handle_stream(
@@ -47,13 +57,14 @@ async def handle_stream(
4757
) -> AsyncIterator[TResponseStreamEvent]:
4858
usage: CompletionUsage | None = None
4959
state = StreamingState()
50-
60+
sequence_number = SequenceNumber()
5161
async for chunk in stream:
5262
if not state.started:
5363
state.started = True
5464
yield ResponseCreatedEvent(
5565
response=response,
5666
type="response.created",
67+
sequence_number=sequence_number.get_and_increment(),
5768
)
5869

5970
# This is always set by the OpenAI API, but not by others e.g. LiteLLM
@@ -89,6 +100,7 @@ async def handle_stream(
89100
item=assistant_item,
90101
output_index=0,
91102
type="response.output_item.added",
103+
sequence_number=sequence_number.get_and_increment(),
92104
)
93105
yield ResponseContentPartAddedEvent(
94106
content_index=state.text_content_index_and_output[0],
@@ -100,6 +112,7 @@ async def handle_stream(
100112
annotations=[],
101113
),
102114
type="response.content_part.added",
115+
sequence_number=sequence_number.get_and_increment(),
103116
)
104117
# Emit the delta for this segment of content
105118
yield ResponseTextDeltaEvent(
@@ -108,6 +121,7 @@ async def handle_stream(
108121
item_id=FAKE_RESPONSES_ID,
109122
output_index=0,
110123
type="response.output_text.delta",
124+
sequence_number=sequence_number.get_and_increment(),
111125
)
112126
# Accumulate the text into the response part
113127
state.text_content_index_and_output[1].text += delta.content
@@ -134,6 +148,7 @@ async def handle_stream(
134148
item=assistant_item,
135149
output_index=0,
136150
type="response.output_item.added",
151+
sequence_number=sequence_number.get_and_increment(),
137152
)
138153
yield ResponseContentPartAddedEvent(
139154
content_index=state.refusal_content_index_and_output[0],
@@ -145,6 +160,7 @@ async def handle_stream(
145160
annotations=[],
146161
),
147162
type="response.content_part.added",
163+
sequence_number=sequence_number.get_and_increment(),
148164
)
149165
# Emit the delta for this segment of refusal
150166
yield ResponseRefusalDeltaEvent(
@@ -153,6 +169,7 @@ async def handle_stream(
153169
item_id=FAKE_RESPONSES_ID,
154170
output_index=0,
155171
type="response.refusal.delta",
172+
sequence_number=sequence_number.get_and_increment(),
156173
)
157174
# Accumulate the refusal string in the output part
158175
state.refusal_content_index_and_output[1].refusal += delta.refusal
@@ -190,6 +207,7 @@ async def handle_stream(
190207
output_index=0,
191208
part=state.text_content_index_and_output[1],
192209
type="response.content_part.done",
210+
sequence_number=sequence_number.get_and_increment(),
193211
)
194212

195213
if state.refusal_content_index_and_output:
@@ -201,6 +219,7 @@ async def handle_stream(
201219
output_index=0,
202220
part=state.refusal_content_index_and_output[1],
203221
type="response.content_part.done",
222+
sequence_number=sequence_number.get_and_increment(),
204223
)
205224

206225
# Actually send events for the function calls
@@ -216,13 +235,15 @@ async def handle_stream(
216235
),
217236
output_index=function_call_starting_index,
218237
type="response.output_item.added",
238+
sequence_number=sequence_number.get_and_increment(),
219239
)
220240
# Then, yield the args
221241
yield ResponseFunctionCallArgumentsDeltaEvent(
222242
delta=function_call.arguments,
223243
item_id=FAKE_RESPONSES_ID,
224244
output_index=function_call_starting_index,
225245
type="response.function_call_arguments.delta",
246+
sequence_number=sequence_number.get_and_increment(),
226247
)
227248
# Finally, the ResponseOutputItemDone
228249
yield ResponseOutputItemDoneEvent(
@@ -235,6 +256,7 @@ async def handle_stream(
235256
),
236257
output_index=function_call_starting_index,
237258
type="response.output_item.done",
259+
sequence_number=sequence_number.get_and_increment(),
238260
)
239261

240262
# Finally, send the Response completed event
@@ -258,6 +280,7 @@ async def handle_stream(
258280
item=assistant_msg,
259281
output_index=0,
260282
type="response.output_item.done",
283+
sequence_number=sequence_number.get_and_increment(),
261284
)
262285

263286
for function_call in state.function_calls.values():
@@ -289,4 +312,5 @@ async def handle_stream(
289312
yield ResponseCompletedEvent(
290313
response=final_response,
291314
type="response.completed",
315+
sequence_number=sequence_number.get_and_increment(),
292316
)

src/agents/models/openai_responses.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from openai.types.responses import (
1111
Response,
1212
ResponseCompletedEvent,
13+
ResponseIncludable,
1314
ResponseStreamEvent,
1415
ResponseTextConfigParam,
1516
ToolParam,
@@ -36,13 +37,6 @@
3637
_USER_AGENT = f"Agents/Python {__version__}"
3738
_HEADERS = {"User-Agent": _USER_AGENT}
3839

39-
# From the Responses API
40-
IncludeLiteral = Literal[
41-
"file_search_call.results",
42-
"message.input_image.image_url",
43-
"computer_call_output.output.image_url",
44-
]
45-
4640

4741
class OpenAIResponsesModel(Model):
4842
"""
@@ -273,7 +267,7 @@ def _get_client(self) -> AsyncOpenAI:
273267
@dataclass
274268
class ConvertedTools:
275269
tools: list[ToolParam]
276-
includes: list[IncludeLiteral]
270+
includes: list[ResponseIncludable]
277271

278272

279273
class Converter:
@@ -330,7 +324,7 @@ def convert_tools(
330324
handoffs: list[Handoff[Any]],
331325
) -> ConvertedTools:
332326
converted_tools: list[ToolParam] = []
333-
includes: list[IncludeLiteral] = []
327+
includes: list[ResponseIncludable] = []
334328

335329
computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)]
336330
if len(computer_tools) > 1:
@@ -348,7 +342,7 @@ def convert_tools(
348342
return ConvertedTools(tools=converted_tools, includes=includes)
349343

350344
@classmethod
351-
def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
345+
def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, ResponseIncludable | None]:
352346
"""Returns converted tool and includes"""
353347

354348
if isinstance(tool, FunctionTool):
@@ -359,7 +353,7 @@ def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
359353
"type": "function",
360354
"description": tool.description,
361355
}
362-
includes: IncludeLiteral | None = None
356+
includes: ResponseIncludable | None = None
363357
elif isinstance(tool, WebSearchTool):
364358
ws: WebSearchToolParam = {
365359
"type": "web_search_preview",

tests/fake_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ async def stream_response(
129129
yield ResponseCompletedEvent(
130130
type="response.completed",
131131
response=get_response_obj(output, usage=self.hardcoded_usage),
132+
sequence_number=0,
132133
)
133134

134135

tests/test_responses_tracing.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def __aiter__(self):
5050
yield ResponseCompletedEvent(
5151
type="response.completed",
5252
response=fake_model.get_response_obj(self.output),
53+
sequence_number=0,
5354
)
5455

5556

@@ -201,6 +202,7 @@ async def __aiter__(self):
201202
yield ResponseCompletedEvent(
202203
type="response.completed",
203204
response=fake_model.get_response_obj([], "dummy-id-123"),
205+
sequence_number=0,
204206
)
205207

206208
return DummyStream()
@@ -253,6 +255,7 @@ async def __aiter__(self):
253255
yield ResponseCompletedEvent(
254256
type="response.completed",
255257
response=fake_model.get_response_obj([], "dummy-id-123"),
258+
sequence_number=0,
256259
)
257260

258261
return DummyStream()
@@ -304,6 +307,7 @@ async def __aiter__(self):
304307
yield ResponseCompletedEvent(
305308
type="response.completed",
306309
response=fake_model.get_response_obj([], "dummy-id-123"),
310+
sequence_number=0,
307311
)
308312

309313
return DummyStream()

tests/voice/test_workflow.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,11 +81,13 @@ async def stream_response(
8181
type="response.output_text.delta",
8282
output_index=0,
8383
item_id=item.id,
84+
sequence_number=0,
8485
)
8586

8687
yield ResponseCompletedEvent(
8788
type="response.completed",
8889
response=get_response_obj(output),
90+
sequence_number=1,
8991
)
9092

9193

uv.lock

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)