diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index a4c14007b3..0453d70e4a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.82.1"
+ ".": "1.83.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index d761f22d73..6f5097c531 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fc64d7c2c8f51f750813375356c3f3fdfc7fc1b1b34f19c20a5410279d445d37.yml
-openapi_spec_hash: 618285fc70199ee32b9ebe4bf72f7e4c
-config_hash: 535b6e5f26a295d609b259c8cb8f656c
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2bcc845d8635bf93ddcf9ee723af4d7928248412a417bee5fc10d863a1e13867.yml
+openapi_spec_hash: 865230cb3abeb01bd85de05891af23c4
+config_hash: ed1e6b3c5f93d12b80d31167f55c557c
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 31b7792a53..645599e6df 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,27 @@
# Changelog
+## 1.83.0 (2025-06-02)
+
+Full Changelog: [v1.82.1...v1.83.0](https://github.com/openai/openai-python/compare/v1.82.1...v1.83.0)
+
+### Features
+
+* **api:** Config update for pakrym-stream-param ([88bcf3a](https://github.com/openai/openai-python/commit/88bcf3af9ce8ffa8347547d4d30aacac1ceba939))
+* **client:** add follow_redirects request option ([26d715f](https://github.com/openai/openai-python/commit/26d715f4e9b0f2b19e2ac16acc796a949338e1e1))
+
+
+### Bug Fixes
+
+* **api:** Fix evals and code interpreter interfaces ([2650159](https://github.com/openai/openai-python/commit/2650159f6d01f6eb481cf8c7942142e4fd21ce44))
+* **client:** return binary content from `get /containers/{container_id}/files/{file_id}/content` ([f7c80c4](https://github.com/openai/openai-python/commit/f7c80c4368434bd0be7436375076ba33a62f63b5))
+
+
+### Chores
+
+* **api:** mark some methods as deprecated ([3e2ca57](https://github.com/openai/openai-python/commit/3e2ca571cb6cdd9e15596590605b2f98a4c5a42e))
+* deprecate Assistants API ([9d166d7](https://github.com/openai/openai-python/commit/9d166d795e03dea49af680ec9597e9497522187c))
+* **docs:** remove reference to rye shell ([c7978e9](https://github.com/openai/openai-python/commit/c7978e9f1640c311022988fcd716cbb5c865daa8))
+
## 1.82.1 (2025-05-29)
Full Changelog: [v1.82.0...v1.82.1](https://github.com/openai/openai-python/compare/v1.82.0...v1.82.1)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 52c2eb213a..c14e652328 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -17,8 +17,7 @@ $ rye sync --all-features
You can then run scripts using `rye run python script.py` or by activating the virtual environment:
```sh
-$ rye shell
-# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
+# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
$ source .venv/bin/activate
# now you can omit the `rye run` prefix
diff --git a/api.md b/api.md
index 73d50fa328..732436aacd 100644
--- a/api.md
+++ b/api.md
@@ -784,7 +784,7 @@ Methods:
- client.responses.create(\*\*params) -> Response
- client.responses.retrieve(response_id, \*\*params) -> Response
- client.responses.delete(response_id) -> None
-- client.responses.cancel(response_id) -> None
+- client.responses.cancel(response_id) -> Response
## InputItems
@@ -894,4 +894,4 @@ Methods:
Methods:
-- client.containers.files.content.retrieve(file_id, \*, container_id) -> None
+- client.containers.files.content.retrieve(file_id, \*, container_id) -> HttpxBinaryResponseContent
diff --git a/examples/assistant.py b/examples/assistant.py
deleted file mode 100644
index f6924a0c7d..0000000000
--- a/examples/assistant.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import openai
-
-# gets API Key from environment variable OPENAI_API_KEY
-client = openai.OpenAI()
-
-assistant = client.beta.assistants.create(
- name="Math Tutor",
- instructions="You are a personal math tutor. Write and run code to answer math questions.",
- tools=[{"type": "code_interpreter"}],
- model="gpt-4-1106-preview",
-)
-
-thread = client.beta.threads.create()
-
-message = client.beta.threads.messages.create(
- thread_id=thread.id,
- role="user",
- content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
-)
-
-run = client.beta.threads.runs.create_and_poll(
- thread_id=thread.id,
- assistant_id=assistant.id,
- instructions="Please address the user as Jane Doe. The user has a premium account.",
-)
-
-print("Run completed with status: " + run.status)
-
-if run.status == "completed":
- messages = client.beta.threads.messages.list(thread_id=thread.id)
-
- print("messages: ")
- for message in messages:
- assert message.content[0].type == "text"
- print({"role": message.role, "message": message.content[0].text.value})
-
- client.beta.assistants.delete(assistant.id)
diff --git a/examples/assistant_stream.py b/examples/assistant_stream.py
deleted file mode 100644
index 0465d3930f..0000000000
--- a/examples/assistant_stream.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import openai
-
-# gets API Key from environment variable OPENAI_API_KEY
-client = openai.OpenAI()
-
-assistant = client.beta.assistants.create(
- name="Math Tutor",
- instructions="You are a personal math tutor. Write and run code to answer math questions.",
- tools=[{"type": "code_interpreter"}],
- model="gpt-4-1106-preview",
-)
-
-thread = client.beta.threads.create()
-
-message = client.beta.threads.messages.create(
- thread_id=thread.id,
- role="user",
- content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
-)
-
-print("starting run stream")
-
-stream = client.beta.threads.runs.create(
- thread_id=thread.id,
- assistant_id=assistant.id,
- instructions="Please address the user as Jane Doe. The user has a premium account.",
- stream=True,
-)
-
-for event in stream:
- print(event.model_dump_json(indent=2, exclude_unset=True))
-
-client.beta.assistants.delete(assistant.id)
diff --git a/examples/assistant_stream_helpers.py b/examples/assistant_stream_helpers.py
deleted file mode 100644
index 7baec77c72..0000000000
--- a/examples/assistant_stream_helpers.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from __future__ import annotations
-
-from typing_extensions import override
-
-import openai
-from openai import AssistantEventHandler
-from openai.types.beta import AssistantStreamEvent
-from openai.types.beta.threads import Text, TextDelta
-from openai.types.beta.threads.runs import RunStep, RunStepDelta
-
-
-class EventHandler(AssistantEventHandler):
- @override
- def on_event(self, event: AssistantStreamEvent) -> None:
- if event.event == "thread.run.step.created":
- details = event.data.step_details
- if details.type == "tool_calls":
- print("Generating code to interpret:\n\n```py")
- elif event.event == "thread.message.created":
- print("\nResponse:\n")
-
- @override
- def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
- print(delta.value, end="", flush=True)
-
- @override
- def on_run_step_done(self, run_step: RunStep) -> None:
- details = run_step.step_details
- if details.type == "tool_calls":
- for tool in details.tool_calls:
- if tool.type == "code_interpreter":
- print("\n```\nExecuting code...")
-
- @override
- def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
- details = delta.step_details
- if details is not None and details.type == "tool_calls":
- for tool in details.tool_calls or []:
- if tool.type == "code_interpreter" and tool.code_interpreter and tool.code_interpreter.input:
- print(tool.code_interpreter.input, end="", flush=True)
-
-
-def main() -> None:
- client = openai.OpenAI()
-
- assistant = client.beta.assistants.create(
- name="Math Tutor",
- instructions="You are a personal math tutor. Write and run code to answer math questions.",
- tools=[{"type": "code_interpreter"}],
- model="gpt-4-1106-preview",
- )
-
- try:
- question = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
-
- thread = client.beta.threads.create(
- messages=[
- {
- "role": "user",
- "content": question,
- },
- ]
- )
- print(f"Question: {question}\n")
-
- with client.beta.threads.runs.stream(
- thread_id=thread.id,
- assistant_id=assistant.id,
- instructions="Please address the user as Jane Doe. The user has a premium account.",
- event_handler=EventHandler(),
- ) as stream:
- stream.until_done()
- print()
- finally:
- client.beta.assistants.delete(assistant.id)
-
-
-main()
diff --git a/pyproject.toml b/pyproject.toml
index 190e9bbbfa..7d3cd30413 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.82.1"
+version = "1.83.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index a0f9cce7d8..44b3603008 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -962,6 +962,9 @@ def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
@@ -1477,6 +1480,9 @@ async def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
diff --git a/src/openai/_models.py b/src/openai/_models.py
index e2fce49250..065e8da760 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -777,6 +777,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
idempotency_key: str
json_data: Body
extra_json: AnyMapping
+ follow_redirects: bool
@final
@@ -790,6 +791,7 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+ follow_redirects: Union[bool, None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
diff --git a/src/openai/_types.py b/src/openai/_types.py
index a5cf207aa3..5dae55f4a9 100644
--- a/src/openai/_types.py
+++ b/src/openai/_types.py
@@ -101,6 +101,7 @@ class RequestOptions(TypedDict, total=False):
params: Query
extra_json: AnyMapping
idempotency_key: str
+ follow_redirects: bool
# Sentinel class used until PEP 0661 is accepted
@@ -217,3 +218,4 @@ class _GenericAlias(Protocol):
class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
+ follow_redirects: bool
diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py
index 60f9dfcbcb..4fd49a1908 100644
--- a/src/openai/_utils/_transform.py
+++ b/src/openai/_utils/_transform.py
@@ -212,7 +212,7 @@ def _transform_recursive(
return data
if isinstance(data, pydantic.BaseModel):
- return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, '__api_exclude__', None))
+ return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, "__api_exclude__", None))
annotated_type = _get_annotated_type(annotation)
if annotated_type is None:
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 9bf34c1f6b..d947f7a74a 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.82.1" # x-release-please-version
+__version__ = "1.83.0" # x-release-please-version
diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py
index 235f912405..41be1d37b0 100644
--- a/src/openai/lib/_parsing/_responses.py
+++ b/src/openai/lib/_parsing/_responses.py
@@ -109,7 +109,7 @@ def parse_response(
or output.type == "code_interpreter_call"
or output.type == "local_shell_call"
or output.type == "mcp_list_tools"
- or output.type == 'exec'
+ or output.type == "exec"
):
output_list.append(output)
elif TYPE_CHECKING: # type: ignore
diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py
index 3c0d4d47c1..90d8b8fdc4 100644
--- a/src/openai/resources/beta/realtime/sessions.py
+++ b/src/openai/resources/beta/realtime/sessions.py
@@ -43,6 +43,7 @@ def with_streaming_response(self) -> SessionsWithStreamingResponse:
def create(
self,
*,
+ client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
@@ -83,6 +84,8 @@ def create(
the Realtime API.
Args:
+ client_secret: Configuration options for the generated client secret.
+
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
@@ -163,6 +166,7 @@ def create(
"/realtime/sessions",
body=maybe_transform(
{
+ "client_secret": client_secret,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
@@ -209,6 +213,7 @@ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
async def create(
self,
*,
+ client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
@@ -249,6 +254,8 @@ async def create(
the Realtime API.
Args:
+ client_secret: Configuration options for the generated client secret.
+
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
@@ -329,6 +336,7 @@ async def create(
"/realtime/sessions",
body=await async_maybe_transform(
{
+ "client_secret": client_secret,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
diff --git a/src/openai/resources/beta/realtime/transcription_sessions.py b/src/openai/resources/beta/realtime/transcription_sessions.py
index dbcb1bb33b..5f97b3c8e3 100644
--- a/src/openai/resources/beta/realtime/transcription_sessions.py
+++ b/src/openai/resources/beta/realtime/transcription_sessions.py
@@ -43,6 +43,7 @@ def with_streaming_response(self) -> TranscriptionSessionsWithStreamingResponse:
def create(
self,
*,
+ client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
@@ -67,6 +68,8 @@ def create(
the Realtime API.
Args:
+ client_secret: Configuration options for the generated client secret.
+
include:
The set of items to include in the transcription. Current available items are:
@@ -113,6 +116,7 @@ def create(
"/realtime/transcription_sessions",
body=maybe_transform(
{
+ "client_secret": client_secret,
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
@@ -152,6 +156,7 @@ def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResp
async def create(
self,
*,
+ client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
@@ -176,6 +181,8 @@ async def create(
the Realtime API.
Args:
+ client_secret: Configuration options for the generated client secret.
+
include:
The set of items to include in the transcription. Current available items are:
@@ -222,6 +229,7 @@ async def create(
"/realtime/transcription_sessions",
body=await async_maybe_transform(
{
+ "client_secret": client_secret,
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py
index 3a8913ef16..943d2e7f05 100644
--- a/src/openai/resources/beta/threads/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import typing_extensions
from typing import Union, Iterable, Optional
from typing_extensions import Literal
@@ -47,6 +48,7 @@ def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
return MessagesWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
@@ -113,6 +115,7 @@ def create(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
message_id: str,
@@ -150,6 +153,7 @@ def retrieve(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def update(
self,
message_id: str,
@@ -196,6 +200,7 @@ def update(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
@@ -267,6 +272,7 @@ def list(
model=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def delete(
self,
message_id: str,
@@ -325,6 +331,7 @@ def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
return AsyncMessagesWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
@@ -391,6 +398,7 @@ async def create(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
message_id: str,
@@ -428,6 +436,7 @@ async def retrieve(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def update(
self,
message_id: str,
@@ -474,6 +483,7 @@ async def update(
cast_to=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
@@ -545,6 +555,7 @@ def list(
model=Message,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def delete(
self,
message_id: str,
@@ -587,20 +598,30 @@ class MessagesWithRawResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
- self.create = _legacy_response.to_raw_response_wrapper(
- messages.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ messages.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- messages.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ messages.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.to_raw_response_wrapper(
- messages.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ messages.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.to_raw_response_wrapper(
- messages.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ messages.list # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = _legacy_response.to_raw_response_wrapper(
- messages.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ messages.delete # pyright: ignore[reportDeprecated],
+ )
)
@@ -608,20 +629,30 @@ class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
- self.create = _legacy_response.async_to_raw_response_wrapper(
- messages.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ messages.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- messages.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ messages.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.async_to_raw_response_wrapper(
- messages.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ messages.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.async_to_raw_response_wrapper(
- messages.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ messages.list # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = _legacy_response.async_to_raw_response_wrapper(
- messages.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ messages.delete # pyright: ignore[reportDeprecated],
+ )
)
@@ -629,20 +660,30 @@ class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
- self.create = to_streamed_response_wrapper(
- messages.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ messages.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = to_streamed_response_wrapper(
- messages.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ messages.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = to_streamed_response_wrapper(
- messages.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ messages.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = to_streamed_response_wrapper(
- messages.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ messages.list # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = to_streamed_response_wrapper(
- messages.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ messages.delete # pyright: ignore[reportDeprecated],
+ )
)
@@ -650,18 +691,28 @@ class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
- self.create = async_to_streamed_response_wrapper(
- messages.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ messages.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = async_to_streamed_response_wrapper(
- messages.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ messages.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = async_to_streamed_response_wrapper(
- messages.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ messages.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = async_to_streamed_response_wrapper(
- messages.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ messages.list # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = async_to_streamed_response_wrapper(
- messages.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ messages.delete # pyright: ignore[reportDeprecated],
+ )
)
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index 4d19010fea..3d9ae9759e 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -83,6 +83,7 @@ def with_streaming_response(self) -> RunsWithStreamingResponse:
return RunsWithStreamingResponse(self)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
@@ -233,6 +234,7 @@ def create(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
@@ -383,6 +385,7 @@ def create(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
@@ -532,6 +535,7 @@ def create(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["assistant_id"], ["assistant_id", "stream"])
def create(
self,
@@ -601,6 +605,7 @@ def create(
stream_cls=Stream[AssistantStreamEvent],
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
run_id: str,
@@ -638,6 +643,7 @@ def retrieve(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def update(
self,
run_id: str,
@@ -684,6 +690,7 @@ def update(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
@@ -751,6 +758,7 @@ def list(
model=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def cancel(
self,
run_id: str,
@@ -788,6 +796,7 @@ def cancel(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create_and_poll(
self,
*,
@@ -822,7 +831,7 @@ def create_and_poll(
lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = self.create(
+ run = self.create( # pyright: ignore[reportDeprecated]
thread_id=thread_id,
assistant_id=assistant_id,
include=include,
@@ -848,7 +857,7 @@ def create_and_poll(
extra_body=extra_body,
timeout=timeout,
)
- return self.poll(
+ return self.poll( # pyright: ignore[reportDeprecated]
run.id,
thread_id=thread_id,
extra_headers=extra_headers,
@@ -996,6 +1005,7 @@ def create_and_stream(
)
return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def poll(
self,
run_id: str,
@@ -1018,7 +1028,7 @@ def poll(
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
while True:
- response = self.with_raw_response.retrieve(
+ response = self.with_raw_response.retrieve( # pyright: ignore[reportDeprecated]
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
@@ -1042,6 +1052,7 @@ def poll(
self._sleep(poll_interval_ms / 1000)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -1074,6 +1085,7 @@ def stream(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -1106,6 +1118,7 @@ def stream(
"""Create a Run stream"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -1184,6 +1197,7 @@ def stream(
return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs(
self,
run_id: str,
@@ -1222,6 +1236,7 @@ def submit_tool_outputs(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs(
self,
run_id: str,
@@ -1260,6 +1275,7 @@ def submit_tool_outputs(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs(
self,
run_id: str,
@@ -1297,7 +1313,9 @@ def submit_tool_outputs(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs(
self,
run_id: str,
@@ -1336,6 +1354,7 @@ def submit_tool_outputs(
stream_cls=Stream[AssistantStreamEvent],
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_and_poll(
self,
*,
@@ -1355,7 +1374,7 @@ def submit_tool_outputs_and_poll(
More information on Run lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = self.submit_tool_outputs(
+ run = self.submit_tool_outputs( # pyright: ignore[reportDeprecated]
run_id=run_id,
thread_id=thread_id,
tool_outputs=tool_outputs,
@@ -1365,7 +1384,7 @@ def submit_tool_outputs_and_poll(
extra_body=extra_body,
timeout=timeout,
)
- return self.poll(
+ return self.poll( # pyright: ignore[reportDeprecated]
run_id=run.id,
thread_id=thread_id,
extra_headers=extra_headers,
@@ -1376,6 +1395,7 @@ def submit_tool_outputs_and_poll(
)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -1397,6 +1417,7 @@ def submit_tool_outputs_stream(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -1418,6 +1439,7 @@ def submit_tool_outputs_stream(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -1494,6 +1516,7 @@ def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
return AsyncRunsWithStreamingResponse(self)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
@@ -1644,6 +1667,7 @@ async def create(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
@@ -1794,6 +1818,7 @@ async def create(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
@@ -1943,7 +1968,9 @@ async def create(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["assistant_id"], ["assistant_id", "stream"])
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
@@ -2012,6 +2039,7 @@ async def create(
stream_cls=AsyncStream[AssistantStreamEvent],
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
run_id: str,
@@ -2049,6 +2077,7 @@ async def retrieve(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def update(
self,
run_id: str,
@@ -2095,6 +2124,7 @@ async def update(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
@@ -2162,6 +2192,7 @@ def list(
model=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def cancel(
self,
run_id: str,
@@ -2199,6 +2230,7 @@ async def cancel(
cast_to=Run,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create_and_poll(
self,
*,
@@ -2233,7 +2265,7 @@ async def create_and_poll(
lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = await self.create(
+ run = await self.create( # pyright: ignore[reportDeprecated]
thread_id=thread_id,
assistant_id=assistant_id,
include=include,
@@ -2259,7 +2291,7 @@ async def create_and_poll(
extra_body=extra_body,
timeout=timeout,
)
- return await self.poll(
+ return await self.poll( # pyright: ignore[reportDeprecated]
run.id,
thread_id=thread_id,
extra_headers=extra_headers,
@@ -2405,6 +2437,7 @@ def create_and_stream(
)
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def poll(
self,
run_id: str,
@@ -2427,7 +2460,7 @@ async def poll(
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
while True:
- response = await self.with_raw_response.retrieve(
+ response = await self.with_raw_response.retrieve( # pyright: ignore[reportDeprecated]
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
@@ -2451,6 +2484,7 @@ async def poll(
await self._sleep(poll_interval_ms / 1000)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -2482,6 +2516,7 @@ def stream(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -2514,6 +2549,7 @@ def stream(
"""Create a Run stream"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def stream(
self,
*,
@@ -2594,6 +2630,7 @@ def stream(
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def submit_tool_outputs(
self,
run_id: str,
@@ -2632,6 +2669,7 @@ async def submit_tool_outputs(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def submit_tool_outputs(
self,
run_id: str,
@@ -2670,6 +2708,7 @@ async def submit_tool_outputs(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def submit_tool_outputs(
self,
run_id: str,
@@ -2707,7 +2746,9 @@ async def submit_tool_outputs(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def submit_tool_outputs(
self,
run_id: str,
@@ -2746,6 +2787,7 @@ async def submit_tool_outputs(
stream_cls=AsyncStream[AssistantStreamEvent],
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def submit_tool_outputs_and_poll(
self,
*,
@@ -2765,7 +2807,7 @@ async def submit_tool_outputs_and_poll(
More information on Run lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = await self.submit_tool_outputs(
+ run = await self.submit_tool_outputs( # pyright: ignore[reportDeprecated]
run_id=run_id,
thread_id=thread_id,
tool_outputs=tool_outputs,
@@ -2775,7 +2817,7 @@ async def submit_tool_outputs_and_poll(
extra_body=extra_body,
timeout=timeout,
)
- return await self.poll(
+ return await self.poll( # pyright: ignore[reportDeprecated]
run_id=run.id,
thread_id=thread_id,
extra_headers=extra_headers,
@@ -2786,6 +2828,7 @@ async def submit_tool_outputs_and_poll(
)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -2807,6 +2850,7 @@ def submit_tool_outputs_stream(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -2828,6 +2872,7 @@ def submit_tool_outputs_stream(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def submit_tool_outputs_stream(
self,
*,
@@ -2885,23 +2930,35 @@ class RunsWithRawResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
- self.create = _legacy_response.to_raw_response_wrapper(
- runs.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- runs.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.to_raw_response_wrapper(
- runs.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.to_raw_response_wrapper(
- runs.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.list # pyright: ignore[reportDeprecated],
+ )
)
- self.cancel = _legacy_response.to_raw_response_wrapper(
- runs.cancel,
+ self.cancel = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.cancel # pyright: ignore[reportDeprecated],
+ )
)
- self.submit_tool_outputs = _legacy_response.to_raw_response_wrapper(
- runs.submit_tool_outputs,
+ self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ runs.submit_tool_outputs # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -2913,23 +2970,35 @@ class AsyncRunsWithRawResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
- self.create = _legacy_response.async_to_raw_response_wrapper(
- runs.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- runs.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.async_to_raw_response_wrapper(
- runs.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.async_to_raw_response_wrapper(
- runs.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.list # pyright: ignore[reportDeprecated],
+ )
)
- self.cancel = _legacy_response.async_to_raw_response_wrapper(
- runs.cancel,
+ self.cancel = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.cancel # pyright: ignore[reportDeprecated],
+ )
)
- self.submit_tool_outputs = _legacy_response.async_to_raw_response_wrapper(
- runs.submit_tool_outputs,
+ self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ runs.submit_tool_outputs # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -2941,23 +3010,35 @@ class RunsWithStreamingResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
- self.create = to_streamed_response_wrapper(
- runs.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = to_streamed_response_wrapper(
- runs.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = to_streamed_response_wrapper(
- runs.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = to_streamed_response_wrapper(
- runs.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.list # pyright: ignore[reportDeprecated],
+ )
)
- self.cancel = to_streamed_response_wrapper(
- runs.cancel,
+ self.cancel = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.cancel # pyright: ignore[reportDeprecated],
+ )
)
- self.submit_tool_outputs = to_streamed_response_wrapper(
- runs.submit_tool_outputs,
+ self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ runs.submit_tool_outputs # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -2969,23 +3050,35 @@ class AsyncRunsWithStreamingResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
- self.create = async_to_streamed_response_wrapper(
- runs.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = async_to_streamed_response_wrapper(
- runs.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = async_to_streamed_response_wrapper(
- runs.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.update # pyright: ignore[reportDeprecated],
+ )
)
- self.list = async_to_streamed_response_wrapper(
- runs.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.list # pyright: ignore[reportDeprecated],
+ )
)
- self.cancel = async_to_streamed_response_wrapper(
- runs.cancel,
+ self.cancel = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.cancel # pyright: ignore[reportDeprecated],
+ )
)
- self.submit_tool_outputs = async_to_streamed_response_wrapper(
- runs.submit_tool_outputs,
+ self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ runs.submit_tool_outputs # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 3d2148687b..eebb2003b2 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import typing_extensions
from typing import List
from typing_extensions import Literal
@@ -42,6 +43,7 @@ def with_streaming_response(self) -> StepsWithStreamingResponse:
"""
return StepsWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
step_id: str,
@@ -95,6 +97,7 @@ def retrieve(
cast_to=RunStep,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
run_id: str,
@@ -196,6 +199,7 @@ def with_streaming_response(self) -> AsyncStepsWithStreamingResponse:
"""
return AsyncStepsWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
step_id: str,
@@ -249,6 +253,7 @@ async def retrieve(
cast_to=RunStep,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
run_id: str,
@@ -334,11 +339,15 @@ class StepsWithRawResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- steps.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ steps.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.to_raw_response_wrapper(
- steps.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ steps.list # pyright: ignore[reportDeprecated],
+ )
)
@@ -346,11 +355,15 @@ class AsyncStepsWithRawResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- steps.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ steps.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.list = _legacy_response.async_to_raw_response_wrapper(
- steps.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ steps.list # pyright: ignore[reportDeprecated],
+ )
)
@@ -358,11 +371,15 @@ class StepsWithStreamingResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
- self.retrieve = to_streamed_response_wrapper(
- steps.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ steps.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.list = to_streamed_response_wrapper(
- steps.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ steps.list # pyright: ignore[reportDeprecated],
+ )
)
@@ -370,9 +387,13 @@ class AsyncStepsWithStreamingResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
- self.retrieve = async_to_streamed_response_wrapper(
- steps.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ steps.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.list = async_to_streamed_response_wrapper(
- steps.list,
+ self.list = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ steps.list # pyright: ignore[reportDeprecated],
+ )
)
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index 13d8cb6411..ff2a41155d 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import typing_extensions
from typing import Union, Iterable, Optional
from functools import partial
from typing_extensions import Literal, overload
@@ -86,6 +87,7 @@ def with_streaming_response(self) -> ThreadsWithStreamingResponse:
"""
return ThreadsWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
*,
@@ -143,6 +145,7 @@ def create(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
thread_id: str,
@@ -177,6 +180,7 @@ def retrieve(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def update(
self,
thread_id: str,
@@ -232,6 +236,7 @@ def update(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def delete(
self,
thread_id: str,
@@ -267,6 +272,7 @@ def delete(
)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create_and_run(
self,
*,
@@ -400,6 +406,7 @@ def create_and_run(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create_and_run(
self,
*,
@@ -533,6 +540,7 @@ def create_and_run(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create_and_run(
self,
*,
@@ -665,7 +673,9 @@ def create_and_run(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["assistant_id"], ["assistant_id", "stream"])
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create_and_run(
self,
*,
@@ -757,7 +767,7 @@ def create_and_run_poll(
More information on Run lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = self.create_and_run(
+ run = self.create_and_run( # pyright: ignore[reportDeprecated]
assistant_id=assistant_id,
instructions=instructions,
max_completion_tokens=max_completion_tokens,
@@ -779,7 +789,7 @@ def create_and_run_poll(
extra_body=extra_body,
timeout=timeout,
)
- return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms)
+ return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms) # pyright: ignore[reportDeprecated]
@overload
def create_and_run_stream(
@@ -935,6 +945,7 @@ def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
"""
return AsyncThreadsWithStreamingResponse(self)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
*,
@@ -992,6 +1003,7 @@ async def create(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
thread_id: str,
@@ -1026,6 +1038,7 @@ async def retrieve(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def update(
self,
thread_id: str,
@@ -1081,6 +1094,7 @@ async def update(
cast_to=Thread,
)
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def delete(
self,
thread_id: str,
@@ -1116,6 +1130,7 @@ async def delete(
)
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create_and_run(
self,
*,
@@ -1249,6 +1264,7 @@ async def create_and_run(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create_and_run(
self,
*,
@@ -1382,6 +1398,7 @@ async def create_and_run(
...
@overload
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create_and_run(
self,
*,
@@ -1514,7 +1531,9 @@ async def create_and_run(
"""
...
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
@required_args(["assistant_id"], ["assistant_id", "stream"])
+ @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create_and_run(
self,
*,
@@ -1606,7 +1625,7 @@ async def create_and_run_poll(
More information on Run lifecycles can be found here:
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
- run = await self.create_and_run(
+ run = await self.create_and_run( # pyright: ignore[reportDeprecated]
assistant_id=assistant_id,
instructions=instructions,
max_completion_tokens=max_completion_tokens,
@@ -1628,7 +1647,7 @@ async def create_and_run_poll(
extra_body=extra_body,
timeout=timeout,
)
- return await self.runs.poll(
+ return await self.runs.poll( # pyright: ignore[reportDeprecated]
run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms
)
@@ -1764,20 +1783,30 @@ class ThreadsWithRawResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
- self.create = _legacy_response.to_raw_response_wrapper(
- threads.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ threads.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- threads.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ threads.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.to_raw_response_wrapper(
- threads.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ threads.update # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = _legacy_response.to_raw_response_wrapper(
- threads.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ threads.delete # pyright: ignore[reportDeprecated],
+ )
)
- self.create_and_run = _legacy_response.to_raw_response_wrapper(
- threads.create_and_run,
+ self.create_and_run = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ threads.create_and_run # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -1793,20 +1822,30 @@ class AsyncThreadsWithRawResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
- self.create = _legacy_response.async_to_raw_response_wrapper(
- threads.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ threads.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- threads.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ threads.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = _legacy_response.async_to_raw_response_wrapper(
- threads.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ threads.update # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = _legacy_response.async_to_raw_response_wrapper(
- threads.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ threads.delete # pyright: ignore[reportDeprecated],
+ )
)
- self.create_and_run = _legacy_response.async_to_raw_response_wrapper(
- threads.create_and_run,
+ self.create_and_run = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ threads.create_and_run # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -1822,20 +1861,30 @@ class ThreadsWithStreamingResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
- self.create = to_streamed_response_wrapper(
- threads.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ threads.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = to_streamed_response_wrapper(
- threads.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ threads.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = to_streamed_response_wrapper(
- threads.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ threads.update # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = to_streamed_response_wrapper(
- threads.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ threads.delete # pyright: ignore[reportDeprecated],
+ )
)
- self.create_and_run = to_streamed_response_wrapper(
- threads.create_and_run,
+ self.create_and_run = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ threads.create_and_run # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
@@ -1851,20 +1900,30 @@ class AsyncThreadsWithStreamingResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
- self.create = async_to_streamed_response_wrapper(
- threads.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ threads.create # pyright: ignore[reportDeprecated],
+ )
)
- self.retrieve = async_to_streamed_response_wrapper(
- threads.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ threads.retrieve # pyright: ignore[reportDeprecated],
+ )
)
- self.update = async_to_streamed_response_wrapper(
- threads.update,
+ self.update = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ threads.update # pyright: ignore[reportDeprecated],
+ )
)
- self.delete = async_to_streamed_response_wrapper(
- threads.delete,
+ self.delete = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ threads.delete # pyright: ignore[reportDeprecated],
+ )
)
- self.create_and_run = async_to_streamed_response_wrapper(
- threads.create_and_run,
+ self.create_and_run = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ threads.create_and_run # pyright: ignore[reportDeprecated],
+ )
)
@cached_property
diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py
index 4dbd1e6c62..a2a664ac59 100644
--- a/src/openai/resources/chat/completions/completions.py
+++ b/src/openai/resources/chat/completions/completions.py
@@ -263,9 +263,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -541,9 +541,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -810,9 +810,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1366,9 +1366,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1644,9 +1644,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1913,9 +1913,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/resources/containers/files/content.py b/src/openai/resources/containers/files/content.py
index 1aa2d1729d..a200383407 100644
--- a/src/openai/resources/containers/files/content.py
+++ b/src/openai/resources/containers/files/content.py
@@ -5,10 +5,15 @@
import httpx
from .... import _legacy_response
-from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._response import (
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+ to_custom_streamed_response_wrapper,
+ async_to_custom_streamed_response_wrapper,
+)
from ...._base_client import make_request_options
__all__ = ["Content", "AsyncContent"]
@@ -45,7 +50,7 @@ def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
@@ -62,13 +67,13 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -103,7 +108,7 @@ async def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
@@ -120,13 +125,13 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -152,8 +157,9 @@ class ContentWithStreamingResponse:
def __init__(self, content: Content) -> None:
self._content = content
- self.retrieve = to_streamed_response_wrapper(
+ self.retrieve = to_custom_streamed_response_wrapper(
content.retrieve,
+ StreamedBinaryAPIResponse,
)
@@ -161,6 +167,7 @@ class AsyncContentWithStreamingResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
- self.retrieve = async_to_streamed_response_wrapper(
+ self.retrieve = async_to_custom_streamed_response_wrapper(
content.retrieve,
+ AsyncStreamedBinaryAPIResponse,
)
diff --git a/src/openai/resources/fine_tuning/alpha/graders.py b/src/openai/resources/fine_tuning/alpha/graders.py
index f27acdfd9c..387e6c72ff 100644
--- a/src/openai/resources/fine_tuning/alpha/graders.py
+++ b/src/openai/resources/fine_tuning/alpha/graders.py
@@ -2,8 +2,6 @@
from __future__ import annotations
-from typing import Union, Iterable
-
import httpx
from .... import _legacy_response
@@ -45,7 +43,7 @@ def run(
*,
grader: grader_run_params.Grader,
model_sample: str,
- reference_answer: Union[str, Iterable[object], float, object],
+ item: object | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -59,9 +57,15 @@ def run(
Args:
grader: The grader used for the fine-tuning job.
- model_sample: The model sample to be evaluated.
+ model_sample: The model sample to be evaluated. This value will be used to populate the
+ `sample` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+ The `output_json` variable will be populated if the model sample is a valid JSON
+ string.
- reference_answer: The reference answer for the evaluation.
+ item: The dataset item provided to the grader. This will be used to populate the
+ `item` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
@@ -77,7 +81,7 @@ def run(
{
"grader": grader,
"model_sample": model_sample,
- "reference_answer": reference_answer,
+ "item": item,
},
grader_run_params.GraderRunParams,
),
@@ -147,7 +151,7 @@ async def run(
*,
grader: grader_run_params.Grader,
model_sample: str,
- reference_answer: Union[str, Iterable[object], float, object],
+ item: object | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -161,9 +165,15 @@ async def run(
Args:
grader: The grader used for the fine-tuning job.
- model_sample: The model sample to be evaluated.
+ model_sample: The model sample to be evaluated. This value will be used to populate the
+ `sample` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+ The `output_json` variable will be populated if the model sample is a valid JSON
+ string.
- reference_answer: The reference answer for the evaluation.
+ item: The dataset item provided to the grader. This will be used to populate the
+ `item` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
@@ -179,7 +189,7 @@ async def run(
{
"grader": grader,
"model_sample": model_sample,
- "reference_answer": reference_answer,
+ "item": item,
},
grader_run_params.GraderRunParams,
),
diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py
index 524bebacae..0f1c9fcb9e 100644
--- a/src/openai/resources/images.py
+++ b/src/openai/resources/images.py
@@ -144,7 +144,7 @@ def edit(
image: The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 25MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -468,7 +468,7 @@ async def edit(
image: The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 25MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py
index 570e7c94d5..c3bec87153 100644
--- a/src/openai/resources/responses/responses.py
+++ b/src/openai/resources/responses/responses.py
@@ -149,6 +149,8 @@ def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -186,9 +188,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -349,6 +351,8 @@ def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -386,9 +390,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -542,6 +546,8 @@ def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -579,9 +585,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -976,6 +982,8 @@ def retrieve(
response_id: str,
*,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ stream: Literal[False] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1016,6 +1024,7 @@ def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Response | Stream[ResponseStreamEvent]: ...
+ @overload
def retrieve(
self,
response_id: str,
@@ -1037,15 +1046,55 @@ def retrieve(
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
- stream: If set to true, the model response data will be streamed to the client using
+ starting_after: The sequence number of the event after which to start streaming.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
- starting_after: When retrieving a background response, this parameter can be used to start
- replaying after an event with the given sequence number. Must be used in conjunction with
- the `stream` parameter set to `true`.
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: Literal[True],
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Stream[ResponseStreamEvent]:
+ """
+ Retrieves a model response with the given ID.
+
+ Args:
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ Response creation above for more information.
+
+ starting_after: The sequence number of the event after which to start streaming.
extra_headers: Send extra headers
@@ -1055,6 +1104,63 @@ def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ @overload
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: bool,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Response | Stream[ResponseStreamEvent]:
+ """
+ Retrieves a model response with the given ID.
+
+ Args:
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ Response creation above for more information.
+
+ starting_after: The sequence number of the event after which to start streaming.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Response | Stream[ResponseStreamEvent]:
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
@@ -1067,8 +1173,8 @@ def retrieve(
query=maybe_transform(
{
"include": include,
- "stream": stream,
"starting_after": starting_after,
+ "stream": stream,
},
response_retrieve_params.ResponseRetrieveParams,
),
@@ -1122,7 +1228,7 @@ def cancel(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> Response:
"""Cancels a model response with the given ID.
Only responses created with the
@@ -1140,13 +1246,12 @@ def cancel(
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/responses/{response_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=Response,
)
@@ -1252,6 +1357,8 @@ async def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -1289,9 +1396,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1452,6 +1559,8 @@ async def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -1489,9 +1598,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1645,6 +1754,8 @@ async def create(
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
instructions: Inserts a system (or developer) message as the first item in the model's
context.
@@ -1682,9 +1793,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -2083,6 +2194,8 @@ async def retrieve(
response_id: str,
*,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ stream: Literal[False] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -2123,6 +2236,7 @@ async def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Response | AsyncStream[ResponseStreamEvent]: ...
+ @overload
async def retrieve(
self,
response_id: str,
@@ -2144,9 +2258,96 @@ async def retrieve(
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
- stream:
- starting_after: When retrieving a background response, this parameter can be used to start
- replaying after an event with the given sequence number. Must be used in
+ starting_after: The sequence number of the event after which to start streaming.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: Literal[True],
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncStream[ResponseStreamEvent]:
+ """
+ Retrieves a model response with the given ID.
+
+ Args:
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ Response creation above for more information.
+
+ starting_after: The sequence number of the event after which to start streaming.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: bool,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
+ """
+ Retrieves a model response with the given ID.
+
+ Args:
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ Response creation above for more information.
+
+ starting_after: The sequence number of the event after which to start streaming.
extra_headers: Send extra headers
@@ -2156,6 +2357,22 @@ async def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ starting_after: int | NotGiven = NOT_GIVEN,
+ stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._get(
@@ -2168,8 +2385,8 @@ async def retrieve(
query=await async_maybe_transform(
{
"include": include,
- "stream": stream,
"starting_after": starting_after,
+ "stream": stream,
},
response_retrieve_params.ResponseRetrieveParams,
),
@@ -2223,7 +2440,7 @@ async def cancel(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> Response:
"""Cancels a model response with the given ID.
Only responses created with the
@@ -2241,13 +2458,12 @@ async def cancel(
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/responses/{response_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=Response,
)
diff --git a/src/openai/types/audio/transcription_text_delta_event.py b/src/openai/types/audio/transcription_text_delta_event.py
index f8d5355491..36c52f0623 100644
--- a/src/openai/types/audio/transcription_text_delta_event.py
+++ b/src/openai/types/audio/transcription_text_delta_event.py
@@ -12,7 +12,7 @@ class Logprob(BaseModel):
token: Optional[str] = None
"""The token that was used to generate the log probability."""
- bytes: Optional[List[object]] = None
+ bytes: Optional[List[int]] = None
"""The bytes that were used to generate the log probability."""
logprob: Optional[float] = None
diff --git a/src/openai/types/audio/transcription_text_done_event.py b/src/openai/types/audio/transcription_text_done_event.py
index 3f1a713a52..c8875a1bdb 100644
--- a/src/openai/types/audio/transcription_text_done_event.py
+++ b/src/openai/types/audio/transcription_text_done_event.py
@@ -12,7 +12,7 @@ class Logprob(BaseModel):
token: Optional[str] = None
"""The token that was used to generate the log probability."""
- bytes: Optional[List[object]] = None
+ bytes: Optional[List[int]] = None
"""The bytes that were used to generate the log probability."""
logprob: Optional[float] = None
diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py
index eadee29b28..7a8e694f45 100644
--- a/src/openai/types/beta/realtime/session_create_params.py
+++ b/src/openai/types/beta/realtime/session_create_params.py
@@ -5,10 +5,21 @@
from typing import List, Union, Iterable
from typing_extensions import Literal, TypedDict
-__all__ = ["SessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+__all__ = [
+ "SessionCreateParams",
+ "ClientSecret",
+ "ClientSecretExpiresAt",
+ "InputAudioNoiseReduction",
+ "InputAudioTranscription",
+ "Tool",
+ "TurnDetection",
+]
class SessionCreateParams(TypedDict, total=False):
+ client_secret: ClientSecret
+ """Configuration options for the generated client secret."""
+
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
"""The format of input audio.
@@ -124,6 +135,25 @@ class SessionCreateParams(TypedDict, total=False):
"""
+class ClientSecretExpiresAt(TypedDict, total=False):
+ anchor: Literal["created_at"]
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: int
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class ClientSecret(TypedDict, total=False):
+ expires_at: ClientSecretExpiresAt
+ """Configuration for the ephemeral token expiration."""
+
+
class InputAudioNoiseReduction(TypedDict, total=False):
type: Literal["near_field", "far_field"]
"""Type of noise reduction.
diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py
index ba34b0260b..1cd3ded27c 100644
--- a/src/openai/types/beta/realtime/session_update_event.py
+++ b/src/openai/types/beta/realtime/session_update_event.py
@@ -8,6 +8,8 @@
__all__ = [
"SessionUpdateEvent",
"Session",
+ "SessionClientSecret",
+ "SessionClientSecretExpiresAt",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTool",
@@ -15,6 +17,25 @@
]
+class SessionClientSecretExpiresAt(BaseModel):
+ anchor: Optional[Literal["created_at"]] = None
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: Optional[int] = None
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class SessionClientSecret(BaseModel):
+ expires_at: Optional[SessionClientSecretExpiresAt] = None
+ """Configuration for the ephemeral token expiration."""
+
+
class SessionInputAudioNoiseReduction(BaseModel):
type: Optional[Literal["near_field", "far_field"]] = None
"""Type of noise reduction.
@@ -116,6 +137,9 @@ class SessionTurnDetection(BaseModel):
class Session(BaseModel):
+ client_secret: Optional[SessionClientSecret] = None
+ """Configuration options for the generated client secret."""
+
input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
"""The format of input audio.
diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py
index 0984d39e91..ee18aec239 100644
--- a/src/openai/types/beta/realtime/session_update_event_param.py
+++ b/src/openai/types/beta/realtime/session_update_event_param.py
@@ -8,6 +8,8 @@
__all__ = [
"SessionUpdateEventParam",
"Session",
+ "SessionClientSecret",
+ "SessionClientSecretExpiresAt",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTool",
@@ -15,6 +17,25 @@
]
+class SessionClientSecretExpiresAt(TypedDict, total=False):
+ anchor: Literal["created_at"]
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: int
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class SessionClientSecret(TypedDict, total=False):
+ expires_at: SessionClientSecretExpiresAt
+ """Configuration for the ephemeral token expiration."""
+
+
class SessionInputAudioNoiseReduction(TypedDict, total=False):
type: Literal["near_field", "far_field"]
"""Type of noise reduction.
@@ -116,6 +137,9 @@ class SessionTurnDetection(TypedDict, total=False):
class Session(TypedDict, total=False):
+ client_secret: SessionClientSecret
+ """Configuration options for the generated client secret."""
+
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
"""The format of input audio.
diff --git a/src/openai/types/beta/realtime/transcription_session_create_params.py b/src/openai/types/beta/realtime/transcription_session_create_params.py
index 1cf511f0b5..15b2f14c14 100644
--- a/src/openai/types/beta/realtime/transcription_session_create_params.py
+++ b/src/openai/types/beta/realtime/transcription_session_create_params.py
@@ -5,10 +5,20 @@
from typing import List
from typing_extensions import Literal, TypedDict
-__all__ = ["TranscriptionSessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "TurnDetection"]
+__all__ = [
+ "TranscriptionSessionCreateParams",
+ "ClientSecret",
+ "ClientSecretExpiresAt",
+ "InputAudioNoiseReduction",
+ "InputAudioTranscription",
+ "TurnDetection",
+]
class TranscriptionSessionCreateParams(TypedDict, total=False):
+ client_secret: ClientSecret
+ """Configuration options for the generated client secret."""
+
include: List[str]
"""The set of items to include in the transcription. Current available items are:
@@ -60,6 +70,25 @@ class TranscriptionSessionCreateParams(TypedDict, total=False):
"""
+class ClientSecretExpiresAt(TypedDict, total=False):
+ anchor: Literal["created_at"]
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: int
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class ClientSecret(TypedDict, total=False):
+ expires_at: ClientSecretExpiresAt
+ """Configuration for the ephemeral token expiration."""
+
+
class InputAudioNoiseReduction(TypedDict, total=False):
type: Literal["near_field", "far_field"]
"""Type of noise reduction.
diff --git a/src/openai/types/beta/realtime/transcription_session_update.py b/src/openai/types/beta/realtime/transcription_session_update.py
index c3e8f011c8..73253b6848 100644
--- a/src/openai/types/beta/realtime/transcription_session_update.py
+++ b/src/openai/types/beta/realtime/transcription_session_update.py
@@ -8,12 +8,33 @@
__all__ = [
"TranscriptionSessionUpdate",
"Session",
+ "SessionClientSecret",
+ "SessionClientSecretExpiresAt",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTurnDetection",
]
+class SessionClientSecretExpiresAt(BaseModel):
+ anchor: Optional[Literal["created_at"]] = None
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: Optional[int] = None
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class SessionClientSecret(BaseModel):
+ expires_at: Optional[SessionClientSecretExpiresAt] = None
+ """Configuration for the ephemeral token expiration."""
+
+
class SessionInputAudioNoiseReduction(BaseModel):
type: Optional[Literal["near_field", "far_field"]] = None
"""Type of noise reduction.
@@ -99,6 +120,9 @@ class SessionTurnDetection(BaseModel):
class Session(BaseModel):
+ client_secret: Optional[SessionClientSecret] = None
+ """Configuration options for the generated client secret."""
+
include: Optional[List[str]] = None
"""The set of items to include in the transcription. Current available items are:
diff --git a/src/openai/types/beta/realtime/transcription_session_update_param.py b/src/openai/types/beta/realtime/transcription_session_update_param.py
index 549c49011b..6b38a9af39 100644
--- a/src/openai/types/beta/realtime/transcription_session_update_param.py
+++ b/src/openai/types/beta/realtime/transcription_session_update_param.py
@@ -8,12 +8,33 @@
__all__ = [
"TranscriptionSessionUpdateParam",
"Session",
+ "SessionClientSecret",
+ "SessionClientSecretExpiresAt",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTurnDetection",
]
+class SessionClientSecretExpiresAt(TypedDict, total=False):
+ anchor: Literal["created_at"]
+ """The anchor point for the ephemeral token expiration.
+
+ Only `created_at` is currently supported.
+ """
+
+ seconds: int
+ """The number of seconds from the anchor point to the expiration.
+
+ Select a value between `10` and `7200`.
+ """
+
+
+class SessionClientSecret(TypedDict, total=False):
+ expires_at: SessionClientSecretExpiresAt
+ """Configuration for the ephemeral token expiration."""
+
+
class SessionInputAudioNoiseReduction(TypedDict, total=False):
type: Literal["near_field", "far_field"]
"""Type of noise reduction.
@@ -99,6 +120,9 @@ class SessionTurnDetection(TypedDict, total=False):
class Session(TypedDict, total=False):
+ client_secret: SessionClientSecret
+ """Configuration options for the generated client secret."""
+
include: List[str]
"""The set of items to include in the transcription. Current available items are:
diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py
index 3a235f89a5..49af1a3d0e 100644
--- a/src/openai/types/chat/chat_completion.py
+++ b/src/openai/types/chat/chat_completion.py
@@ -68,9 +68,9 @@ class ChatCompletion(BaseModel):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py
index 6fe996dd95..c109e10f97 100644
--- a/src/openai/types/chat/chat_completion_chunk.py
+++ b/src/openai/types/chat/chat_completion_chunk.py
@@ -137,9 +137,9 @@ class ChatCompletionChunk(BaseModel):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py
index 5ea1c82f3d..e55cc2d0b7 100644
--- a/src/openai/types/chat/completion_create_params.py
+++ b/src/openai/types/chat/completion_create_params.py
@@ -217,9 +217,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/types/fine_tuning/alpha/grader_run_params.py b/src/openai/types/fine_tuning/alpha/grader_run_params.py
index fa729f55ba..646407fe09 100644
--- a/src/openai/types/fine_tuning/alpha/grader_run_params.py
+++ b/src/openai/types/fine_tuning/alpha/grader_run_params.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union
from typing_extensions import Required, TypeAlias, TypedDict
from ...graders.multi_grader_param import MultiGraderParam
@@ -19,10 +19,20 @@ class GraderRunParams(TypedDict, total=False):
"""The grader used for the fine-tuning job."""
model_sample: Required[str]
- """The model sample to be evaluated."""
+ """The model sample to be evaluated.
- reference_answer: Required[Union[str, Iterable[object], float, object]]
- """The reference answer for the evaluation."""
+ This value will be used to populate the `sample` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+ The `output_json` variable will be populated if the model sample is a valid JSON
+ string.
+ """
+
+ item: object
+ """The dataset item provided to the grader.
+
+ This will be used to populate the `item` namespace. See
+ [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+ """
Grader: TypeAlias = Union[
diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py
index b6123f8ba6..f626fbba64 100644
--- a/src/openai/types/fine_tuning/fine_tuning_job.py
+++ b/src/openai/types/fine_tuning/fine_tuning_job.py
@@ -28,7 +28,7 @@ class Error(BaseModel):
class Hyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, Optional[object], None] = None
+ batch_size: Union[Literal["auto"], int, None] = None
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
diff --git a/src/openai/types/graders/multi_grader.py b/src/openai/types/graders/multi_grader.py
index 220de2e61b..7539c68ef5 100644
--- a/src/openai/types/graders/multi_grader.py
+++ b/src/openai/types/graders/multi_grader.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, Union
+from typing import Union
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
@@ -19,7 +19,11 @@ class MultiGrader(BaseModel):
calculate_output: str
"""A formula to calculate the output based on grader results."""
- graders: Dict[str, Graders]
+ graders: Graders
+ """
+ A StringCheckGrader object that performs a string comparison between input and
+ reference using a specified operation.
+ """
name: str
"""The name of the grader."""
diff --git a/src/openai/types/graders/multi_grader_param.py b/src/openai/types/graders/multi_grader_param.py
index 2984b5668f..28a6705b81 100644
--- a/src/openai/types/graders/multi_grader_param.py
+++ b/src/openai/types/graders/multi_grader_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, Union
+from typing import Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .python_grader_param import PythonGraderParam
@@ -22,7 +22,11 @@ class MultiGraderParam(TypedDict, total=False):
calculate_output: Required[str]
"""A formula to calculate the output based on grader results."""
- graders: Required[Dict[str, Graders]]
+ graders: Required[Graders]
+ """
+ A StringCheckGrader object that performs a string comparison between input and
+ reference using a specified operation.
+ """
name: Required[str]
"""The name of the grader."""
diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py
index 6294e8ac19..4f931ce141 100644
--- a/src/openai/types/image_edit_params.py
+++ b/src/openai/types/image_edit_params.py
@@ -16,7 +16,7 @@ class ImageEditParams(TypedDict, total=False):
"""The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 25MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
diff --git a/src/openai/types/responses/parsed_response.py b/src/openai/types/responses/parsed_response.py
index f0b85f7209..e59e86d2b7 100644
--- a/src/openai/types/responses/parsed_response.py
+++ b/src/openai/types/responses/parsed_response.py
@@ -55,7 +55,7 @@ class ParsedResponseOutputMessage(ResponseOutputMessage, GenericModel, Generic[C
class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
parsed_arguments: object = None
- __api_exclude__ = {'parsed_arguments'}
+ __api_exclude__ = {"parsed_arguments"}
ParsedResponseOutputItem: TypeAlias = Annotated[
diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py
index 0d30d58ddb..441b345414 100644
--- a/src/openai/types/responses/response.py
+++ b/src/openai/types/responses/response.py
@@ -164,9 +164,9 @@ class Response(BaseModel):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
index f25b3f3cab..d222431504 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -17,5 +17,5 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
sequence_number: int
"""The sequence number of this event."""
- type: Literal["response.code_interpreter_call.code.delta"]
- """The type of the event. Always `response.code_interpreter_call.code.delta`."""
+ type: Literal["response.code_interpreter_call_code.delta"]
+ """The type of the event. Always `response.code_interpreter_call_code.delta`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
index bf1868cf0f..1ce6796a0e 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -17,5 +17,5 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
sequence_number: int
"""The sequence number of this event."""
- type: Literal["response.code_interpreter_call.code.done"]
- """The type of the event. Always `response.code_interpreter_call.code.done`."""
+ type: Literal["response.code_interpreter_call_code.done"]
+ """The type of the event. Always `response.code_interpreter_call_code.done`."""
diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py
index 28b2b59135..1abc2ccb1d 100644
--- a/src/openai/types/responses/response_create_params.py
+++ b/src/openai/types/responses/response_create_params.py
@@ -67,6 +67,8 @@ class ResponseCreateParamsBase(TypedDict, total=False):
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
"""
instructions: Optional[str]
@@ -122,9 +124,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- If set to 'flex', the request will be processed with the Flex Processing
service tier.
[Learn more](https://platform.openai.com/docs/guides/flex-processing).
diff --git a/src/openai/types/responses/response_includable.py b/src/openai/types/responses/response_includable.py
index a01dddd71d..28869832b0 100644
--- a/src/openai/types/responses/response_includable.py
+++ b/src/openai/types/responses/response_includable.py
@@ -9,4 +9,5 @@
"message.input_image.image_url",
"computer_call_output.output.image_url",
"reasoning.encrypted_content",
+ "code_interpreter_call.outputs",
]
diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py
index fa653cd1af..1ea9a4ba93 100644
--- a/src/openai/types/responses/response_output_text.py
+++ b/src/openai/types/responses/response_output_text.py
@@ -1,12 +1,21 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union
+from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
-__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"]
+__all__ = [
+ "ResponseOutputText",
+ "Annotation",
+ "AnnotationFileCitation",
+ "AnnotationURLCitation",
+ "AnnotationContainerFileCitation",
+ "AnnotationFilePath",
+ "Logprob",
+ "LogprobTopLogprob",
+]
class AnnotationFileCitation(BaseModel):
@@ -37,6 +46,23 @@ class AnnotationURLCitation(BaseModel):
"""The URL of the web resource."""
+class AnnotationContainerFileCitation(BaseModel):
+ container_id: str
+ """The ID of the container file."""
+
+ end_index: int
+ """The index of the last character of the container file citation in the message."""
+
+ file_id: str
+ """The ID of the file."""
+
+ start_index: int
+ """The index of the first character of the container file citation in the message."""
+
+ type: Literal["container_file_citation"]
+ """The type of the container file citation. Always `container_file_citation`."""
+
+
class AnnotationFilePath(BaseModel):
file_id: str
"""The ID of the file."""
@@ -49,10 +75,29 @@ class AnnotationFilePath(BaseModel):
Annotation: TypeAlias = Annotated[
- Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+ Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath],
+ PropertyInfo(discriminator="type"),
]
+class LogprobTopLogprob(BaseModel):
+ token: str
+
+ bytes: List[int]
+
+ logprob: float
+
+
+class Logprob(BaseModel):
+ token: str
+
+ bytes: List[int]
+
+ logprob: float
+
+ top_logprobs: List[LogprobTopLogprob]
+
+
class ResponseOutputText(BaseModel):
annotations: List[Annotation]
"""The annotations of the text output."""
@@ -62,3 +107,5 @@ class ResponseOutputText(BaseModel):
type: Literal["output_text"]
"""The type of the output text. Always `output_text`."""
+
+ logprobs: Optional[List[Logprob]] = None
diff --git a/src/openai/types/responses/response_output_text_param.py b/src/openai/types/responses/response_output_text_param.py
index 1f0967285f..207901e8ef 100644
--- a/src/openai/types/responses/response_output_text_param.py
+++ b/src/openai/types/responses/response_output_text_param.py
@@ -10,7 +10,10 @@
"Annotation",
"AnnotationFileCitation",
"AnnotationURLCitation",
+ "AnnotationContainerFileCitation",
"AnnotationFilePath",
+ "Logprob",
+ "LogprobTopLogprob",
]
@@ -42,6 +45,23 @@ class AnnotationURLCitation(TypedDict, total=False):
"""The URL of the web resource."""
+class AnnotationContainerFileCitation(TypedDict, total=False):
+ container_id: Required[str]
+ """The ID of the container file."""
+
+ end_index: Required[int]
+ """The index of the last character of the container file citation in the message."""
+
+ file_id: Required[str]
+ """The ID of the file."""
+
+ start_index: Required[int]
+ """The index of the first character of the container file citation in the message."""
+
+ type: Required[Literal["container_file_citation"]]
+ """The type of the container file citation. Always `container_file_citation`."""
+
+
class AnnotationFilePath(TypedDict, total=False):
file_id: Required[str]
"""The ID of the file."""
@@ -53,7 +73,27 @@ class AnnotationFilePath(TypedDict, total=False):
"""The type of the file path. Always `file_path`."""
-Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath]
+Annotation: TypeAlias = Union[
+ AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath
+]
+
+
+class LogprobTopLogprob(TypedDict, total=False):
+ token: Required[str]
+
+ bytes: Required[Iterable[int]]
+
+ logprob: Required[float]
+
+
+class Logprob(TypedDict, total=False):
+ token: Required[str]
+
+ bytes: Required[Iterable[int]]
+
+ logprob: Required[float]
+
+ top_logprobs: Required[Iterable[LogprobTopLogprob]]
class ResponseOutputTextParam(TypedDict, total=False):
@@ -65,3 +105,5 @@ class ResponseOutputTextParam(TypedDict, total=False):
type: Required[Literal["output_text"]]
"""The type of the output text. Always `output_text`."""
+
+ logprobs: Iterable[Logprob]
diff --git a/src/openai/types/responses/response_retrieve_params.py b/src/openai/types/responses/response_retrieve_params.py
index 137bf4dcee..a092bd7fb8 100644
--- a/src/openai/types/responses/response_retrieve_params.py
+++ b/src/openai/types/responses/response_retrieve_params.py
@@ -2,17 +2,47 @@
from __future__ import annotations
-from typing import List
-from typing_extensions import TypedDict
+from typing import List, Union
+from typing_extensions import Literal, Required, TypedDict
from .response_includable import ResponseIncludable
-__all__ = ["ResponseRetrieveParams"]
+__all__ = ["ResponseRetrieveParamsBase", "ResponseRetrieveParamsNonStreaming", "ResponseRetrieveParamsStreaming"]
-class ResponseRetrieveParams(TypedDict, total=False):
+class ResponseRetrieveParamsBase(TypedDict, total=False):
include: List[ResponseIncludable]
"""Additional fields to include in the response.
See the `include` parameter for Response creation above for more information.
"""
+
+ starting_after: int
+ """The sequence number of the event after which to start streaming."""
+
+
+class ResponseRetrieveParamsNonStreaming(ResponseRetrieveParamsBase, total=False):
+ stream: Literal[False]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+
+class ResponseRetrieveParamsStreaming(ResponseRetrieveParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+
+ResponseRetrieveParams = Union[ResponseRetrieveParamsNonStreaming, ResponseRetrieveParamsStreaming]
diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py
index 378226c124..4174560d42 100644
--- a/src/openai/types/responses/tool_param.py
+++ b/src/openai/types/responses/tool_param.py
@@ -28,6 +28,7 @@
"LocalShell",
]
+
class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False):
tool_names: List[str]
"""List of allowed tool names."""
@@ -177,5 +178,5 @@ class LocalShell(TypedDict, total=False):
LocalShell,
]
-
+
ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py
index f432b7d277..c2046bdb7a 100644
--- a/tests/api_resources/beta/realtime/test_sessions.py
+++ b/tests/api_resources/beta/realtime/test_sessions.py
@@ -25,6 +25,12 @@ def test_method_create(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
session = client.beta.realtime.sessions.create(
+ client_secret={
+ "expires_at": {
+ "anchor": "created_at",
+ "seconds": 0,
+ }
+ },
input_audio_format="pcm16",
input_audio_noise_reduction={"type": "near_field"},
input_audio_transcription={
@@ -92,6 +98,12 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.realtime.sessions.create(
+ client_secret={
+ "expires_at": {
+ "anchor": "created_at",
+ "seconds": 0,
+ }
+ },
input_audio_format="pcm16",
input_audio_noise_reduction={"type": "near_field"},
input_audio_transcription={
diff --git a/tests/api_resources/beta/realtime/test_transcription_sessions.py b/tests/api_resources/beta/realtime/test_transcription_sessions.py
index 4826185bea..5a6b4f6c92 100644
--- a/tests/api_resources/beta/realtime/test_transcription_sessions.py
+++ b/tests/api_resources/beta/realtime/test_transcription_sessions.py
@@ -25,6 +25,12 @@ def test_method_create(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
transcription_session = client.beta.realtime.transcription_sessions.create(
+ client_secret={
+ "expires_at": {
+ "anchor": "created_at",
+ "seconds": 0,
+ }
+ },
include=["string"],
input_audio_format="pcm16",
input_audio_noise_reduction={"type": "near_field"},
@@ -78,6 +84,12 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
transcription_session = await async_client.beta.realtime.transcription_sessions.create(
+ client_secret={
+ "expires_at": {
+ "anchor": "created_at",
+ "seconds": 0,
+ }
+ },
include=["string"],
input_audio_format="pcm16",
input_audio_noise_reduction={"type": "near_field"},
diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py
index 9916d5bdc6..eab94f0f8a 100644
--- a/tests/api_resources/beta/test_threads.py
+++ b/tests/api_resources/beta/test_threads.py
@@ -15,6 +15,8 @@
)
from openai.types.beta.threads import Run
+# pyright: reportDeprecated=false
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -23,45 +25,50 @@ class TestThreads:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
- thread = client.beta.threads.create()
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.create()
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
- thread = client.beta.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ metadata={"foo": "string"},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
- },
- )
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.create()
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -70,27 +77,31 @@ def test_raw_response_create(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- thread = client.beta.threads.retrieve(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.retrieve(
+ "thread_id",
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.retrieve(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.retrieve(
+ "thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -99,48 +110,55 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.retrieve(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.retrieve(
+ "thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.with_raw_response.retrieve(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.with_raw_response.retrieve(
+ "",
+ )
@parametrize
def test_method_update(self, client: OpenAI) -> None:
- thread = client.beta.threads.update(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.update(
+ thread_id="thread_id",
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
- thread = client.beta.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.update(
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.update(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.update(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -149,36 +167,41 @@ def test_raw_response_update(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.update(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.update(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.with_raw_response.update(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.with_raw_response.update(
+ thread_id="",
+ )
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
- thread = client.beta.threads.delete(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.delete(
+ "thread_id",
+ )
+
assert_matches_type(ThreadDeleted, thread, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.delete(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.delete(
+ "thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -187,92 +210,99 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.delete(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.delete(
+ "thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadDeleted, thread, path=["response"])
+ thread = response.parse()
+ assert_matches_type(ThreadDeleted, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.with_raw_response.delete(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.with_raw_response.delete(
+ "",
+ )
@parametrize
def test_method_create_and_run_overload_1(self, client: OpenAI) -> None:
- thread = client.beta.threads.create_and_run(
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ )
+
assert_matches_type(Run, thread, path=["response"])
@parametrize
def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) -> None:
- thread = client.beta.threads.create_and_run(
- assistant_id="string",
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- response_format="auto",
- stream=False,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread = client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ response_format="auto",
+ stream=False,
+ temperature=1,
+ thread={
+ "messages": [
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ "metadata": {"foo": "string"},
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
},
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
assert_matches_type(Run, thread, path=["response"])
@parametrize
def test_raw_response_create_and_run_overload_1(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.create_and_run(
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.create_and_run(
+ assistant_id="assistant_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -281,87 +311,93 @@ def test_raw_response_create_and_run_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_and_run_overload_1(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.create_and_run(
- assistant_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.create_and_run(
+ assistant_id="assistant_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(Run, thread, path=["response"])
+ thread = response.parse()
+ assert_matches_type(Run, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_create_and_run_overload_2(self, client: OpenAI) -> None:
- thread_stream = client.beta.threads.create_and_run(
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ thread_stream = client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ )
+
thread_stream.response.close()
@parametrize
def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) -> None:
- thread_stream = client.beta.threads.create_and_run(
- assistant_id="string",
- stream=True,
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- response_format="auto",
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread_stream = client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ response_format="auto",
+ temperature=1,
+ thread={
+ "messages": [
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ "metadata": {"foo": "string"},
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
},
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
thread_stream.response.close()
@parametrize
def test_raw_response_create_and_run_overload_2(self, client: OpenAI) -> None:
- response = client.beta.threads.with_raw_response.create_and_run(
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.with_raw_response.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -369,15 +405,16 @@ def test_raw_response_create_and_run_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_and_run_overload_2(self, client: OpenAI) -> None:
- with client.beta.threads.with_streaming_response.create_and_run(
- assistant_id="string",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.with_streaming_response.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
@@ -387,45 +424,50 @@ class TestAsyncThreads:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.create()
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.create()
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ metadata={"foo": "string"},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
- },
- )
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.create()
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -434,27 +476,31 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = await response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.retrieve(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.retrieve(
+ "thread_id",
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.retrieve(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.retrieve(
+ "thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -463,48 +509,55 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.retrieve(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.retrieve(
+ "thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = await response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.with_raw_response.retrieve(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.with_raw_response.retrieve(
+ "",
+ )
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.update(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.update(
+ thread_id="thread_id",
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.update(
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ )
+
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.update(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.update(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -513,36 +566,41 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.update(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.update(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(Thread, thread, path=["response"])
+ thread = await response.parse()
+ assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.with_raw_response.update(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.with_raw_response.update(
+ thread_id="",
+ )
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.delete(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.delete(
+ "thread_id",
+ )
+
assert_matches_type(ThreadDeleted, thread, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.delete(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.delete(
+ "thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -551,92 +609,99 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.delete(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.delete(
+ "thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadDeleted, thread, path=["response"])
+ thread = await response.parse()
+ assert_matches_type(ThreadDeleted, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.with_raw_response.delete(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.with_raw_response.delete(
+ "",
+ )
@parametrize
async def test_method_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.create_and_run(
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ )
+
assert_matches_type(Run, thread, path=["response"])
@parametrize
async def test_method_create_and_run_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
- thread = await async_client.beta.threads.create_and_run(
- assistant_id="string",
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- response_format="auto",
- stream=False,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread = await async_client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ response_format="auto",
+ stream=False,
+ temperature=1,
+ thread={
+ "messages": [
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ "metadata": {"foo": "string"},
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
},
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
assert_matches_type(Run, thread, path=["response"])
@parametrize
async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.create_and_run(
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.create_and_run(
+ assistant_id="assistant_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -645,87 +710,93 @@ async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncO
@parametrize
async def test_streaming_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.create_and_run(
- assistant_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.create_and_run(
+ assistant_id="assistant_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(Run, thread, path=["response"])
+ thread = await response.parse()
+ assert_matches_type(Run, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
- thread_stream = await async_client.beta.threads.create_and_run(
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ thread_stream = await async_client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ )
+
await thread_stream.response.aclose()
@parametrize
async def test_method_create_and_run_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
- thread_stream = await async_client.beta.threads.create_and_run(
- assistant_id="string",
- stream=True,
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- response_format="auto",
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
+ with pytest.warns(DeprecationWarning):
+ thread_stream = await async_client.beta.threads.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ response_format="auto",
+ temperature=1,
+ thread={
+ "messages": [
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ "metadata": {"foo": "string"},
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "chunking_strategy": {"type": "auto"},
+ "file_ids": ["string"],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ },
},
},
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
await thread_stream.response.aclose()
@parametrize
async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.with_raw_response.create_and_run(
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.with_raw_response.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -733,14 +804,15 @@ async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncO
@parametrize
async def test_streaming_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.with_streaming_response.create_and_run(
- assistant_id="string",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- stream = await response.parse()
- await stream.close()
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.with_streaming_response.create_and_run(
+ assistant_id="assistant_id",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py
index f5dc17e0b5..9ca70657ec 100644
--- a/tests/api_resources/beta/threads/runs/test_steps.py
+++ b/tests/api_resources/beta/threads/runs/test_steps.py
@@ -12,6 +12,8 @@
from openai.pagination import SyncCursorPage, AsyncCursorPage
from openai.types.beta.threads.runs import RunStep
+# pyright: reportDeprecated=false
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -20,30 +22,35 @@ class TestSteps:
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- step = client.beta.threads.runs.steps.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ step = client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
+
assert_matches_type(RunStep, step, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
- step = client.beta.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
+ with pytest.warns(DeprecationWarning):
+ step = client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ )
+
assert_matches_type(RunStep, step, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -52,69 +59,76 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.threads.runs.steps.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = response.parse()
- assert_matches_type(RunStep, step, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.steps.with_streaming_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ step = response.parse()
+ assert_matches_type(RunStep, step, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="",
- run_id="string",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="string",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- client.beta.threads.runs.steps.with_raw_response.retrieve(
- "",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="",
+ run_id="run_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
+ client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
@parametrize
def test_method_list(self, client: OpenAI) -> None:
- step = client.beta.threads.runs.steps.list(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ step = client.beta.threads.runs.steps.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
- step = client.beta.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
+ with pytest.warns(DeprecationWarning):
+ step = client.beta.threads.runs.steps.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ limit=0,
+ order="asc",
+ )
+
assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.steps.with_raw_response.list(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -123,31 +137,33 @@ def test_raw_response_list(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
- with client.beta.threads.runs.steps.with_streaming_response.list(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.steps.with_streaming_response.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = response.parse()
- assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
+ step = response.parse()
+ assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.steps.with_raw_response.list(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.steps.with_raw_response.list(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="",
+ thread_id="thread_id",
+ )
class TestAsyncSteps:
@@ -155,30 +171,35 @@ class TestAsyncSteps:
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- step = await async_client.beta.threads.runs.steps.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ step = await async_client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
+
assert_matches_type(RunStep, step, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
- step = await async_client.beta.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
+ with pytest.warns(DeprecationWarning):
+ step = await async_client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ )
+
assert_matches_type(RunStep, step, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -187,69 +208,76 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- run_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = await response.parse()
- assert_matches_type(RunStep, step, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ step = await response.parse()
+ assert_matches_type(RunStep, step, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="",
- run_id="string",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
- "string",
- thread_id="string",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
- "",
- thread_id="string",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="",
+ run_id="run_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
+ await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+ step_id="",
+ thread_id="thread_id",
+ run_id="run_id",
+ )
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
- step = await async_client.beta.threads.runs.steps.list(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ step = await async_client.beta.threads.runs.steps.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
- step = await async_client.beta.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
+ with pytest.warns(DeprecationWarning):
+ step = await async_client.beta.threads.runs.steps.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ limit=0,
+ order="asc",
+ )
+
assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.steps.with_raw_response.list(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -258,28 +286,30 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.steps.with_streaming_response.list(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.steps.with_streaming_response.list(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = await response.parse()
- assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
+ step = await response.parse()
+ assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.steps.with_raw_response.list(
- "string",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.steps.with_raw_response.list(
- "",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="run_id",
+ thread_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.steps.with_raw_response.list(
+ run_id="",
+ thread_id="thread_id",
+ )
diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py
index 9189a2f29e..bf3f22e8a3 100644
--- a/tests/api_resources/beta/threads/test_messages.py
+++ b/tests/api_resources/beta/threads/test_messages.py
@@ -15,6 +15,8 @@
MessageDeleted,
)
+# pyright: reportDeprecated=false
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -23,36 +25,41 @@ class TestMessages:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.create(
- "string",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.create(
- "string",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ attachments=[
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.with_raw_response.create(
- "string",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.messages.with_raw_response.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -61,42 +68,47 @@ def test_raw_response_create(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
- with client.beta.threads.messages.with_streaming_response.create(
- "string",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.messages.with_streaming_response.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.with_raw_response.create(
- "",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.create(
+ thread_id="",
+ content="string",
+ role="user",
+ )
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.with_raw_response.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -105,55 +117,62 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.threads.messages.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.messages.with_streaming_response.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.with_raw_response.retrieve(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="message_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.beta.threads.messages.with_raw_response.retrieve(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="",
+ thread_id="thread_id",
+ )
@parametrize
def test_method_update(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.with_raw_response.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.messages.with_raw_response.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -162,56 +181,63 @@ def test_raw_response_update(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
- with client.beta.threads.messages.with_streaming_response.update(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.messages.with_streaming_response.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.with_raw_response.update(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.update(
+ message_id="message_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.beta.threads.messages.with_raw_response.update(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.update(
+ message_id="",
+ thread_id="thread_id",
+ )
@parametrize
def test_method_list(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.list(
+ thread_id="thread_id",
+ )
+
assert_matches_type(SyncCursorPage[Message], message, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.list(
- "string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.list(
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ limit=0,
+ order="asc",
+ run_id="run_id",
+ )
+
assert_matches_type(SyncCursorPage[Message], message, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.with_raw_response.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.messages.with_raw_response.list(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -220,38 +246,43 @@ def test_raw_response_list(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
- with client.beta.threads.messages.with_streaming_response.list(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.messages.with_streaming_response.list(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(SyncCursorPage[Message], message, path=["response"])
+ message = response.parse()
+ assert_matches_type(SyncCursorPage[Message], message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.with_raw_response.list(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.list(
+ thread_id="",
+ )
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
- message = client.beta.threads.messages.delete(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = client.beta.threads.messages.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(MessageDeleted, message, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.with_raw_response.delete(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.messages.with_raw_response.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -260,31 +291,33 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
- with client.beta.threads.messages.with_streaming_response.delete(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.messages.with_streaming_response.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageDeleted, message, path=["response"])
+ message = response.parse()
+ assert_matches_type(MessageDeleted, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.with_raw_response.delete(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.delete(
+ message_id="message_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.beta.threads.messages.with_raw_response.delete(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ client.beta.threads.messages.with_raw_response.delete(
+ message_id="",
+ thread_id="thread_id",
+ )
class TestAsyncMessages:
@@ -292,36 +325,41 @@ class TestAsyncMessages:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.create(
- "string",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.create(
- "string",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ attachments=[
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.with_raw_response.create(
- "string",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.messages.with_raw_response.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -330,42 +368,47 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.with_streaming_response.create(
- "string",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.messages.with_streaming_response.create(
+ thread_id="thread_id",
+ content="string",
+ role="user",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = await response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.create(
- "",
- content="string",
- role="user",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.create(
+ thread_id="",
+ content="string",
+ role="user",
+ )
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.with_raw_response.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -374,55 +417,62 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.messages.with_streaming_response.retrieve(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = await response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.retrieve(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="message_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.retrieve(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.retrieve(
+ message_id="",
+ thread_id="thread_id",
+ )
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.with_raw_response.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.messages.with_raw_response.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -431,56 +481,63 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.with_streaming_response.update(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.messages.with_streaming_response.update(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(Message, message, path=["response"])
+ message = await response.parse()
+ assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.update(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.update(
+ message_id="message_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.update(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.update(
+ message_id="",
+ thread_id="thread_id",
+ )
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.list(
+ thread_id="thread_id",
+ )
+
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.list(
- "string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- run_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.list(
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ limit=0,
+ order="asc",
+ run_id="run_id",
+ )
+
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.with_raw_response.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.messages.with_raw_response.list(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -489,38 +546,43 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.with_streaming_response.list(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.messages.with_streaming_response.list(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
+ message = await response.parse()
+ assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.list(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.list(
+ thread_id="",
+ )
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
- message = await async_client.beta.threads.messages.delete(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ message = await async_client.beta.threads.messages.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(MessageDeleted, message, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.with_raw_response.delete(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.messages.with_raw_response.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -529,28 +591,30 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.with_streaming_response.delete(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.messages.with_streaming_response.delete(
+ message_id="message_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageDeleted, message, path=["response"])
+ message = await response.parse()
+ assert_matches_type(MessageDeleted, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.delete(
- "string",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.beta.threads.messages.with_raw_response.delete(
- "",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.delete(
+ message_id="message_id",
+ thread_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+ await async_client.beta.threads.messages.with_raw_response.delete(
+ message_id="",
+ thread_id="thread_id",
+ )
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index 4230ccebe4..fdef5e40db 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -24,58 +24,63 @@ class TestRuns:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.create(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=False,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
+ additional_messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ reasoning_effort="low",
+ response_format="auto",
+ stream=False,
+ temperature=1,
+ tool_choice="none",
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.create(
- "string",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -84,82 +89,89 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.create(
- "string",
- assistant_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create_overload_1(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.create(
- "",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.create(
+ thread_id="",
+ assistant_id="assistant_id",
+ )
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
- run_stream = client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ run_stream = client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ )
+
run_stream.response.close()
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
- run_stream = client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- stream=True,
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ with pytest.warns(DeprecationWarning):
+ run_stream = client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
+ additional_messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ reasoning_effort="low",
+ response_format="auto",
+ temperature=1,
+ tool_choice="none",
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
run_stream.response.close()
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.create(
- "string",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -167,42 +179,47 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.create(
- "string",
- assistant_id="string",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create_overload_2(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.create(
- "",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.create(
+ thread_id="",
+ assistant_id="assistant_id",
+ stream=True,
+ )
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -211,55 +228,62 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.retrieve(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.with_raw_response.retrieve(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
def test_method_update(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -268,55 +292,62 @@ def test_raw_response_update(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.update(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.update(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.update(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.with_raw_response.update(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.update(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
def test_method_list(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.list(
+ thread_id="thread_id",
+ )
+
assert_matches_type(SyncCursorPage[Run], run, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.list(
- "string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.list(
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ limit=0,
+ order="asc",
+ )
+
assert_matches_type(SyncCursorPage[Run], run, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.list(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -325,38 +356,43 @@ def test_raw_response_list(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.list(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.list(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(SyncCursorPage[Run], run, path=["response"])
+ run = response.parse()
+ assert_matches_type(SyncCursorPage[Run], run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.list(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.list(
+ thread_id="",
+ )
@parametrize
def test_method_cancel(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.cancel(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_raw_response_cancel(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.cancel(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -365,63 +401,70 @@ def test_raw_response_cancel(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_cancel(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.cancel(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_cancel(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.cancel(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.cancel(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.with_raw_response.cancel(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.cancel(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
def test_method_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: OpenAI) -> None:
- run = client.beta.threads.runs.submit_tool_outputs(
- "string",
- thread_id="string",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=False,
- )
+ with pytest.warns(DeprecationWarning):
+ run = client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[
+ {
+ "output": "output",
+ "tool_call_id": "tool_call_id",
+ }
+ ],
+ stream=False,
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -430,53 +473,58 @@ def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> No
@parametrize
def test_streaming_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="",
+ tool_outputs=[{}],
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
+ @parametrize
+ def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ run_stream = client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
thread_id="thread_id",
+ stream=True,
tool_outputs=[{}],
)
- @parametrize
- def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
- run_stream = client.beta.threads.runs.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
run_stream.response.close()
@parametrize
def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
- response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -484,37 +532,39 @@ def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> No
@parametrize
def test_streaming_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
- with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- stream = response.parse()
- stream.close()
+ with pytest.warns(DeprecationWarning):
+ with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="",
- stream=True,
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="",
+ stream=True,
+ tool_outputs=[{}],
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ )
class TestAsyncRuns:
@@ -522,58 +572,63 @@ class TestAsyncRuns:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.create(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=False,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
+ additional_messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ reasoning_effort="low",
+ response_format="auto",
+ stream=False,
+ temperature=1,
+ tool_choice="none",
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.create(
- "string",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -582,82 +637,89 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.create(
- "string",
- assistant_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create_overload_1(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.create(
- "",
- assistant_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.create(
+ thread_id="",
+ assistant_id="assistant_id",
+ )
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
- run_stream = await async_client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ run_stream = await async_client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ )
+
await run_stream.response.aclose()
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
- run_stream = await async_client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- stream=True,
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="string",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="string",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
+ with pytest.warns(DeprecationWarning):
+ run_stream = await async_client.beta.threads.runs.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
+ additional_messages=[
+ {
+ "content": "string",
+ "role": "user",
+ "attachments": [
+ {
+ "file_id": "file_id",
+ "tools": [{"type": "code_interpreter"}],
+ }
+ ],
+ "metadata": {"foo": "string"},
+ }
+ ],
+ instructions="instructions",
+ max_completion_tokens=256,
+ max_prompt_tokens=256,
+ metadata={"foo": "string"},
+ model="string",
+ parallel_tool_calls=True,
+ reasoning_effort="low",
+ response_format="auto",
+ temperature=1,
+ tool_choice="none",
+ tools=[{"type": "code_interpreter"}],
+ top_p=1,
+ truncation_strategy={
+ "type": "auto",
+ "last_messages": 1,
+ },
+ )
+
await run_stream.response.aclose()
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.create(
- "string",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -665,42 +727,47 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.create(
- "string",
- assistant_id="string",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.create(
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = await response.parse()
- await stream.close()
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create_overload_2(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.create(
- "",
- assistant_id="string",
- stream=True,
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.create(
+ thread_id="",
+ assistant_id="assistant_id",
+ stream=True,
+ )
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.retrieve(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -709,55 +776,62 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.retrieve(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.retrieve(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.retrieve(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.retrieve(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.retrieve(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.update(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -766,55 +840,62 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.update(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.update(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.update(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.update(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.update(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.update(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.list(
+ thread_id="thread_id",
+ )
+
assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.list(
- "string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.list(
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ limit=0,
+ order="asc",
+ )
+
assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.list(
- "string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.list(
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -823,38 +904,43 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.list(
- "string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.list(
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.list(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.list(
+ thread_id="",
+ )
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.cancel(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.cancel(
- "string",
- thread_id="string",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -863,63 +949,70 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.cancel(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.cancel(
+ run_id="run_id",
+ thread_id="thread_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.cancel(
- "string",
- thread_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.cancel(
+ run_id="run_id",
+ thread_id="",
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.cancel(
- "",
- thread_id="string",
- )
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.cancel(
+ run_id="",
+ thread_id="thread_id",
+ )
@parametrize
async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
- run = await async_client.beta.threads.runs.submit_tool_outputs(
- "string",
- thread_id="string",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=False,
- )
+ with pytest.warns(DeprecationWarning):
+ run = await async_client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[
+ {
+ "output": "output",
+ "tool_call_id": "tool_call_id",
+ }
+ ],
+ stream=False,
+ )
+
assert_matches_type(Run, run, path=["response"])
@parametrize
async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -928,53 +1021,58 @@ async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: A
@parametrize
async def test_streaming_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(Run, run, path=["response"])
+ run = await response.parse()
+ assert_matches_type(Run, run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="",
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="",
+ tool_outputs=[{}],
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="",
+ thread_id="thread_id",
+ tool_outputs=[{}],
+ )
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
+ @parametrize
+ async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ run_stream = await async_client.beta.threads.runs.submit_tool_outputs(
+ run_id="run_id",
thread_id="thread_id",
+ stream=True,
tool_outputs=[{}],
)
- @parametrize
- async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
- run_stream = await async_client.beta.threads.runs.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
await run_stream.response.aclose()
@parametrize
async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -982,34 +1080,36 @@ async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: A
@parametrize
async def test_streaming_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
- "string",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- stream = await response.parse()
- await stream.close()
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "string",
- thread_id="",
- stream=True,
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
- "",
- thread_id="string",
- stream=True,
- tool_outputs=[{}],
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="run_id",
+ thread_id="",
+ stream=True,
+ tool_outputs=[{}],
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+ run_id="",
+ thread_id="thread_id",
+ stream=True,
+ tool_outputs=[{}],
+ )
diff --git a/tests/api_resources/containers/files/test_content.py b/tests/api_resources/containers/files/test_content.py
index 470353e18d..402607058f 100644
--- a/tests/api_resources/containers/files/test_content.py
+++ b/tests/api_resources/containers/files/test_content.py
@@ -5,9 +5,15 @@
import os
from typing import Any, cast
+import httpx
import pytest
+from respx import MockRouter
+import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+
+# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -16,15 +22,25 @@ class TestContent:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_retrieve(self, client: OpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
content = client.containers.files.content.retrieve(
file_id="file_id",
container_id="container_id",
)
- assert content is None
+ assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
+ assert content.json() == {"foo": "bar"}
@parametrize
- def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
response = client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
@@ -33,10 +49,14 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
- assert content is None
+ assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
with client.containers.files.content.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
@@ -45,11 +65,12 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
- assert content is None
+ assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
+ @pytest.mark.respx(base_url=base_url)
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.content.with_raw_response.retrieve(
@@ -68,15 +89,25 @@ class TestAsyncContent:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ async def test_method_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
content = await async_client.containers.files.content.retrieve(
file_id="file_id",
container_id="container_id",
)
- assert content is None
+ assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
+ assert content.json() == {"foo": "bar"}
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
response = await async_client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
@@ -85,10 +116,14 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
- assert content is None
+ assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ @pytest.mark.respx(base_url=base_url)
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/containers/container_id/files/file_id/content").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
async with async_client.containers.files.content.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
@@ -97,11 +132,12 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = await response.parse()
- assert content is None
+ assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
+ @pytest.mark.respx(base_url=base_url)
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.content.with_raw_response.retrieve(
diff --git a/tests/api_resources/fine_tuning/alpha/test_graders.py b/tests/api_resources/fine_tuning/alpha/test_graders.py
index b144c78c74..c7fe6670f3 100644
--- a/tests/api_resources/fine_tuning/alpha/test_graders.py
+++ b/tests/api_resources/fine_tuning/alpha/test_graders.py
@@ -31,7 +31,6 @@ def test_method_run(self, client: OpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
)
assert_matches_type(GraderRunResponse, grader, path=["response"])
@@ -46,7 +45,7 @@ def test_method_run_with_all_params(self, client: OpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
+ item={},
)
assert_matches_type(GraderRunResponse, grader, path=["response"])
@@ -61,7 +60,6 @@ def test_raw_response_run(self, client: OpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
)
assert response.is_closed is True
@@ -80,7 +78,6 @@ def test_streaming_response_run(self, client: OpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -167,7 +164,6 @@ async def test_method_run(self, async_client: AsyncOpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
)
assert_matches_type(GraderRunResponse, grader, path=["response"])
@@ -182,7 +178,7 @@ async def test_method_run_with_all_params(self, async_client: AsyncOpenAI) -> No
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
+ item={},
)
assert_matches_type(GraderRunResponse, grader, path=["response"])
@@ -197,7 +193,6 @@ async def test_raw_response_run(self, async_client: AsyncOpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
)
assert response.is_closed is True
@@ -216,7 +211,6 @@ async def test_streaming_response_run(self, async_client: AsyncOpenAI) -> None:
"type": "string_check",
},
model_sample="model_sample",
- reference_answer="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index 0d33de4a15..7c0f980fbd 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -164,22 +164,24 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: OpenAI) -> None:
+ def test_method_retrieve_overload_1(self, client: OpenAI) -> None:
response = client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
)
assert_matches_type(Response, response, path=["response"])
@parametrize
- def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+ def test_method_retrieve_with_all_params_overload_1(self, client: OpenAI) -> None:
response = client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
include=["file_search_call.results"],
+ starting_after=0,
+ stream=False,
)
assert_matches_type(Response, response, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ def test_raw_response_retrieve_overload_1(self, client: OpenAI) -> None:
http_response = client.responses.with_raw_response.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
)
@@ -190,7 +192,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
assert_matches_type(Response, response, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ def test_streaming_response_retrieve_overload_1(self, client: OpenAI) -> None:
with client.responses.with_streaming_response.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
) as http_response:
@@ -203,10 +205,61 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
assert cast(Any, http_response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: OpenAI) -> None:
+ def test_path_params_retrieve_overload_1(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ client.responses.with_raw_response.retrieve(
+ response_id="",
+ )
+
+ @parametrize
+ def test_method_retrieve_overload_2(self, client: OpenAI) -> None:
+ response_stream = client.responses.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ )
+ response_stream.response.close()
+
+ @parametrize
+ def test_method_retrieve_with_all_params_overload_2(self, client: OpenAI) -> None:
+ response_stream = client.responses.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ include=["file_search_call.results"],
+ starting_after=0,
+ )
+ response_stream.response.close()
+
+ @parametrize
+ def test_raw_response_retrieve_overload_2(self, client: OpenAI) -> None:
+ response = client.responses.with_raw_response.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @parametrize
+ def test_streaming_response_retrieve_overload_2(self, client: OpenAI) -> None:
+ with client.responses.with_streaming_response.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve_overload_2(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
client.responses.with_raw_response.retrieve(
response_id="",
+ stream=True,
)
@parametrize
@@ -252,7 +305,7 @@ def test_method_cancel(self, client: OpenAI) -> None:
response = client.responses.cancel(
"resp_677efb5139a88190b512bc3fef8e535d",
)
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
@parametrize
def test_raw_response_cancel(self, client: OpenAI) -> None:
@@ -263,7 +316,7 @@ def test_raw_response_cancel(self, client: OpenAI) -> None:
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
@parametrize
def test_streaming_response_cancel(self, client: OpenAI) -> None:
@@ -274,7 +327,7 @@ def test_streaming_response_cancel(self, client: OpenAI) -> None:
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
@@ -436,22 +489,24 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
)
assert_matches_type(Response, response, path=["response"])
@parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_retrieve_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
include=["file_search_call.results"],
+ starting_after=0,
+ stream=False,
)
assert_matches_type(Response, response, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async def test_raw_response_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
http_response = await async_client.responses.with_raw_response.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
)
@@ -462,7 +517,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
assert_matches_type(Response, response, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async def test_streaming_response_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.responses.with_streaming_response.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
) as http_response:
@@ -475,10 +530,61 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N
assert cast(Any, http_response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async def test_path_params_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ await async_client.responses.with_raw_response.retrieve(
+ response_id="",
+ )
+
+ @parametrize
+ async def test_method_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+ response_stream = await async_client.responses.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ )
+ await response_stream.response.aclose()
+
+ @parametrize
+ async def test_method_retrieve_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
+ response_stream = await async_client.responses.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ include=["file_search_call.results"],
+ starting_after=0,
+ )
+ await response_stream.response.aclose()
+
+ @parametrize
+ async def test_raw_response_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.responses.with_raw_response.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ await stream.close()
+
+ @parametrize
+ async def test_streaming_response_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.responses.with_streaming_response.retrieve(
+ response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
await async_client.responses.with_raw_response.retrieve(
response_id="",
+ stream=True,
)
@parametrize
@@ -524,7 +630,7 @@ async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.cancel(
"resp_677efb5139a88190b512bc3fef8e535d",
)
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
@@ -535,7 +641,7 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
@@ -546,7 +652,7 @@ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> Non
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert response is None
+ assert_matches_type(Response, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
diff --git a/tests/lib/chat/_utils.py b/tests/lib/chat/_utils.py
index af08db417c..f3982278f3 100644
--- a/tests/lib/chat/_utils.py
+++ b/tests/lib/chat/_utils.py
@@ -28,7 +28,7 @@ def __repr_args__(self: pydantic.BaseModel) -> ReprArgs:
string = rich_print_str(obj)
- # we remove all `fn_name..` occurences
+ # we remove all `fn_name..` occurrences
# so that we can share the same snapshots between
# pydantic v1 and pydantic v2 as their output for
# generic models differs, e.g.
diff --git a/tests/lib/test_assistants.py b/tests/lib/test_assistants.py
index 67d021ec35..08ea9300c3 100644
--- a/tests/lib/test_assistants.py
+++ b/tests/lib/test_assistants.py
@@ -11,7 +11,7 @@ def test_create_and_run_poll_method_definition_in_sync(sync: bool, client: OpenA
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
- checking_client.beta.threads.create_and_run,
+ checking_client.beta.threads.create_and_run, # pyright: ignore[reportDeprecated]
checking_client.beta.threads.create_and_run_poll,
exclude_params={"stream"},
)
@@ -22,7 +22,7 @@ def test_create_and_run_stream_method_definition_in_sync(sync: bool, client: Ope
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
- checking_client.beta.threads.create_and_run,
+ checking_client.beta.threads.create_and_run, # pyright: ignore[reportDeprecated]
checking_client.beta.threads.create_and_run_stream,
exclude_params={"stream"},
)
@@ -33,8 +33,8 @@ def test_run_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
- checking_client.beta.threads.runs.create,
- checking_client.beta.threads.runs.stream,
+ checking_client.beta.threads.runs.create, # pyright: ignore[reportDeprecated]
+ checking_client.beta.threads.runs.stream, # pyright: ignore[reportDeprecated]
exclude_params={"stream"},
)
@@ -44,7 +44,7 @@ def test_create_and_poll_method_definition_in_sync(sync: bool, client: OpenAI, a
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
- checking_client.beta.threads.runs.create,
- checking_client.beta.threads.runs.create_and_poll,
+ checking_client.beta.threads.runs.create, # pyright: ignore[reportDeprecated]
+ checking_client.beta.threads.runs.create_and_poll, # pyright: ignore[reportDeprecated]
exclude_params={"stream"},
)
diff --git a/tests/test_client.py b/tests/test_client.py
index 616255af3c..2b7aeaf946 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -908,6 +908,33 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
+
class TestAsyncOpenAI:
client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
@@ -1829,3 +1856,30 @@ async def test_main() -> None:
raise AssertionError("calling get_platform using asyncify resulted in a hung process")
time.sleep(0.1)
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ await self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"