Closed
Description
Confirm this is an issue with the Python library and not an underlying OpenAI API
- This is an issue with the Python library
Describe the bug
completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Can you generate an example json object describing a fruit?",
}
],
model="gpt-3.5-turbo",
response_format={"type": "json_object"},
)
---------------------------------------------------------------------------
BadRequestError Traceback (most recent call last)
Cell In[5], line 1
----> 1 completion = client.chat.completions.create(
2 messages=[
3 {
4 "role": "user",
5 "content": "Can you generate an example json object describing a fruit?",
6 }
7 ],
8 model="gpt-3.5-turbo",
9 response_format={"type": "json_object"},
10 )
File ~/anaconda3/envs/llama/lib/python3.10/site-packages/openai/_utils/_utils.py:299, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
297 msg = f"Missing required argument: {quote(missing[0])}"
298 raise TypeError(msg)
--> 299 return func(*args, **kwargs)
File ~/anaconda3/envs/llama/lib/python3.10/site-packages/openai/resources/chat/completions.py:598, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_p, user, extra_headers, extra_query, extra_body, timeout)
551 @required_args(["messages", "model"], ["messages", "model", "stream"])
552 def create(
553 self,
(...)
596 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
597 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 598 return self._post(
599 "/chat/completions",
600 body=maybe_transform(
601 {
602 "messages": messages,
603 "model": model,
604 "frequency_penalty": frequency_penalty,
605 "function_call": function_call,
606 "functions": functions,
607 "logit_bias": logit_bias,
608 "max_tokens": max_tokens,
609 "n": n,
610 "presence_penalty": presence_penalty,
611 "response_format": response_format,
612 "seed": seed,
613 "stop": stop,
614 "stream": stream,
615 "temperature": temperature,
616 "tool_choice": tool_choice,
617 "tools": tools,
618 "top_p": top_p,
619 "user": user,
620 },
621 completion_create_params.CompletionCreateParams,
622 ),
623 options=make_request_options(
624 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
625 ),
626 cast_to=ChatCompletion,
627 stream=stream or False,
628 stream_cls=Stream[ChatCompletionChunk],
629 )
File ~/anaconda3/envs/llama/lib/python3.10/site-packages/openai/_base_client.py:1063, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1049 def post(
1050 self,
1051 path: str,
(...)
1058 stream_cls: type[_StreamT] | None = None,
1059 ) -> ResponseT | _StreamT:
1060 opts = FinalRequestOptions.construct(
1061 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1062 )
-> 1063 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~/anaconda3/envs/llama/lib/python3.10/site-packages/openai/_base_client.py:842, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
833 def request(
834 self,
835 cast_to: Type[ResponseT],
(...)
840 stream_cls: type[_StreamT] | None = None,
841 ) -> ResponseT | _StreamT:
--> 842 return self._request(
843 cast_to=cast_to,
844 options=options,
845 stream=stream,
846 stream_cls=stream_cls,
847 remaining_retries=remaining_retries,
848 )
File ~/anaconda3/envs/llama/lib/python3.10/site-packages/openai/_base_client.py:885, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
882 # If the response is streamed then we need to explicitly read the response
883 # to completion before attempting to access the response text.
884 err.response.read()
--> 885 raise self._make_status_error_from_response(err.response) from None
886 except httpx.TimeoutException as err:
887 if retries > 0:
BadRequestError: Error code: 400 - {'error': {'message': "Invalid parameter: 'response_format' of type 'json_object' is not supported with this model.", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}
To Reproduce
- run the code block
- error
Code snippets
completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Can you generate an example json object describing a fruit?",
}
],
model="gpt-3.5-turbo",
response_format={"type": "json_object"},
)
OS
22.04.1-Ubuntu
Python version
3.10.13
Library version
1.3.5
Metadata
Metadata
Assignees
Labels
No labels