From 8462450af772a39d74e687056dcc127fac4c6614 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 31 Mar 2025 06:03:26 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/__init__.py | 16 +- elasticsearch/_async/client/inference.py | 1513 +++++++++++++++++++++- elasticsearch/_sync/client/__init__.py | 16 +- elasticsearch/_sync/client/inference.py | 1513 +++++++++++++++++++++- elasticsearch/dsl/field.py | 17 + 5 files changed, 3031 insertions(+), 44 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index a897f2d37..4eab35bb0 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -1581,7 +1581,7 @@ async def delete_by_query( If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text @@ -3420,7 +3420,8 @@ async def msearch( computationally expensive named queries on a large number of hits may add significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi - search API can execute. + search API can execute. Defaults to `max(1, (# of data nodes * min(search + thread pool size, 10)))`. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter @@ -3748,6 +3749,7 @@ async def open_point_in_time( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, + max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -3803,6 +3805,8 @@ async def open_point_in_time( a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. + :param max_concurrent_shard_requests: Maximum number of concurrent shard requests + that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific @@ -3830,6 +3834,8 @@ async def open_point_in_time( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if max_concurrent_shard_requests is not None: + __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: @@ -4370,7 +4376,7 @@ async def render_search_template( human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, - source: t.Optional[str] = None, + source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -5774,7 +5780,7 @@ async def search_template( search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, - source: t.Optional[str] = None, + source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: @@ -6512,7 +6518,7 @@ async def update_by_query( wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index ce96dba63..73983f07a 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -234,6 +234,67 @@ async def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="chat_completion_request", + ) + async def post_eis_chat_completion( + self, + *, + eis_inference_id: str, + chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform a chat completion task through the Elastic Inference Service (EIS).

+

Perform a chat completion inference task with the elastic service.

+ + + ``_ + + :param eis_inference_id: The unique identifier of the inference endpoint. + :param chat_completion_request: + """ + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if chat_completion_request is None and body is None: + raise ValueError( + "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." + ) + elif chat_completion_request is not None and body is not None: + raise ValueError("Cannot set both 'chat_completion_request' and 'body'") + __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} + __path = ( + f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = ( + chat_completion_request if chat_completion_request is not None else body + ) + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.post_eis_chat_completion", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) @@ -329,14 +390,14 @@ async def put( "task_settings", ), ) - async def put_openai( + async def put_alibabacloud( self, *, task_type: t.Union[ - str, t.Literal["chat_completion", "completion", "text_embedding"] + str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] ], - openai_inference_id: str, - service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + alibabacloud_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, @@ -349,8 +410,8 @@ async def put_openai( """ .. raw:: html -

Create an OpenAI inference endpoint.

-

Create an inference endpoint to perform an inference task with the openai service.

+

Create an AlibabaCloud AI Search inference endpoint.

+

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. @@ -358,33 +419,33 @@ async def put_openai( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

- ``_ + ``_ :param task_type: The type of the inference task that the model will perform. - NOTE: The `chat_completion` task type only supports streaming and only through - the _stream API. - :param openai_inference_id: The unique identifier of the inference endpoint. + :param alibabacloud_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In - this case, `openai`. + this case, `alibabacloud-ai-search`. :param service_settings: Settings used to install the inference model. These - settings are specific to the `openai` service. + settings are specific to the `alibabacloud-ai-search` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") - if openai_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if alibabacloud_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'alibabacloud_inference_id'" + ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), - "openai_inference_id": _quote(openai_inference_id), + "alibabacloud_inference_id": _quote(alibabacloud_inference_id), } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -415,7 +476,1425 @@ async def put_openai( params=__query, headers=__headers, body=__body, - endpoint_id="inference.put_openai", + endpoint_id="inference.put_alibabacloud", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_amazonbedrock( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + amazonbedrock_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Amazon Bedrock inference endpoint.

+

Creates an inference endpoint to perform an inference task with the amazonbedrock service.

+
+

info + You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

+
+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `amazonbedrock`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `amazonbedrock` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if amazonbedrock_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'amazonbedrock_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_amazonbedrock", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_anthropic( + self, + *, + task_type: t.Union[str, t.Literal["completion"]], + anthropic_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Anthropic inference endpoint.

+

Create an inference endpoint to perform an inference task with the anthropic service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `completion`. + :param anthropic_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `anthropic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if anthropic_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'anthropic_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "anthropic_inference_id": _quote(anthropic_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_anthropic", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_azureaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Azure AI studio inference endpoint.

+

Create an inference endpoint to perform an inference task with the azureaistudio service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param azureaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureaistudio_inference_id": _quote(azureaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_azureopenai( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureopenai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Azure OpenAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the azureopenai service.

+

The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

+ +

The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param azureopenai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureopenai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `azureopenai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureopenai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureopenai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureopenai_inference_id": _quote(azureopenai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureopenai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_cohere( + self, + *, + task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], + cohere_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Cohere inference endpoint.

+

Create an inference endpoint to perform an inference task with the cohere service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param cohere_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `cohere`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `cohere` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if cohere_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'cohere_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "cohere_inference_id": _quote(cohere_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_cohere", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + async def put_eis( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion"]], + eis_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Elastic Inference Service (EIS) inference endpoint.

+

Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param eis_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elastic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elastic` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "eis_inference_id": _quote(eis_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_eis", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_elasticsearch( + self, + *, + task_type: t.Union[ + str, t.Literal["rerank", "sparse_embedding", "text_embedding"] + ], + elasticsearch_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Elasticsearch inference endpoint.

+

Create an inference endpoint to perform an inference task with the elasticsearch service.

+
+

info + Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

+
+

If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

+
+

info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

+
+

After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elasticsearch_inference_id: The unique identifier of the inference endpoint. + The must not match the `model_id`. + :param service: The type of service supported for the specified task type. In + this case, `elasticsearch`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elasticsearch` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elasticsearch_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'elasticsearch_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elasticsearch_inference_id": _quote(elasticsearch_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elasticsearch", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_elser( + self, + *, + task_type: t.Union[str, t.Literal["sparse_embedding"]], + elser_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an ELSER inference endpoint.

+

Create an inference endpoint to perform an inference task with the elser service. + You can also deploy ELSER by using the Elasticsearch inference integration.

+
+

info + Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

+
+

The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

+
+

info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

+
+

After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elser_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elser`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elser` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elser_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'elser_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elser_inference_id": _quote(elser_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elser", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_googleaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + googleaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Google AI Studio inference endpoint.

+

Create an inference endpoint to perform an inference task with the googleaistudio service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googleaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googleaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googleaistudio` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googleaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googleaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googleaistudio_inference_id": _quote(googleaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googleaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_googlevertexai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + googlevertexai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Google Vertex AI inference endpoint.

+

Create an inference endpoint to perform an inference task with the googlevertexai service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googlevertexai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googlevertexai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googlevertexai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googlevertexai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googlevertexai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googlevertexai_inference_id": _quote(googlevertexai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googlevertexai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_hugging_face( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + huggingface_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Hugging Face inference endpoint.

+

Create an inference endpoint to perform an inference task with the hugging_face service.

+

You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. + Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. + Create the endpoint and copy the URL after the endpoint initialization has been finished.

+

The following models are recommended for the Hugging Face service:

+
    +
  • all-MiniLM-L6-v2
  • +
  • all-MiniLM-L12-v2
  • +
  • all-mpnet-base-v2
  • +
  • e5-base-v2
  • +
  • e5-small-v2
  • +
  • multilingual-e5-base
  • +
  • multilingual-e5-small
  • +
+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param huggingface_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `hugging_face`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `hugging_face` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if huggingface_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'huggingface_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "huggingface_inference_id": _quote(huggingface_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_hugging_face", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_jinaai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + jinaai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an JinaAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the jinaai service.

+

To review the available rerank models, refer to https://jina.ai/reranker. + To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param jinaai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `jinaai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `jinaai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if jinaai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "jinaai_inference_id": _quote(jinaai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_jinaai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_mistral( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + mistral_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Mistral inference endpoint.

+

Creates an inference endpoint to perform an inference task with the mistral service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param mistral_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `mistral`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `mistral` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if mistral_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'mistral_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "mistral_inference_id": _quote(mistral_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_mistral", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_openai( + self, + *, + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], + openai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an OpenAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the openai service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param openai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `openai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if openai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "openai_inference_id": _quote(openai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_openai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_voyageai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + voyageai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a VoyageAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the voyageai service.

+

Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param voyageai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `voyageai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `voyageai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if voyageai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "voyageai_inference_id": _quote(voyageai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_voyageai", path_parts=__path_parts, ) diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 1d80efee7..d4082ceb1 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -1579,7 +1579,7 @@ def delete_by_query( If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text @@ -3418,7 +3418,8 @@ def msearch( computationally expensive named queries on a large number of hits may add significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi - search API can execute. + search API can execute. Defaults to `max(1, (# of data nodes * min(search + thread pool size, 10)))`. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter @@ -3746,6 +3747,7 @@ def open_point_in_time( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, + max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -3801,6 +3803,8 @@ def open_point_in_time( a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. + :param max_concurrent_shard_requests: Maximum number of concurrent shard requests + that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific @@ -3828,6 +3832,8 @@ def open_point_in_time( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if max_concurrent_shard_requests is not None: + __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: @@ -4368,7 +4374,7 @@ def render_search_template( human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, - source: t.Optional[str] = None, + source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -5772,7 +5778,7 @@ def search_template( search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, - source: t.Optional[str] = None, + source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: @@ -6510,7 +6516,7 @@ def update_by_query( wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 6bab33aec..279528a96 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -234,6 +234,67 @@ def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="chat_completion_request", + ) + def post_eis_chat_completion( + self, + *, + eis_inference_id: str, + chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform a chat completion task through the Elastic Inference Service (EIS).

+

Perform a chat completion inference task with the elastic service.

+ + + ``_ + + :param eis_inference_id: The unique identifier of the inference endpoint. + :param chat_completion_request: + """ + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if chat_completion_request is None and body is None: + raise ValueError( + "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." + ) + elif chat_completion_request is not None and body is not None: + raise ValueError("Cannot set both 'chat_completion_request' and 'body'") + __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} + __path = ( + f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = ( + chat_completion_request if chat_completion_request is not None else body + ) + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.post_eis_chat_completion", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) @@ -329,14 +390,14 @@ def put( "task_settings", ), ) - def put_openai( + def put_alibabacloud( self, *, task_type: t.Union[ - str, t.Literal["chat_completion", "completion", "text_embedding"] + str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] ], - openai_inference_id: str, - service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + alibabacloud_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, @@ -349,8 +410,8 @@ def put_openai( """ .. raw:: html -

Create an OpenAI inference endpoint.

-

Create an inference endpoint to perform an inference task with the openai service.

+

Create an AlibabaCloud AI Search inference endpoint.

+

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. @@ -358,33 +419,33 @@ def put_openai( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

- ``_ + ``_ :param task_type: The type of the inference task that the model will perform. - NOTE: The `chat_completion` task type only supports streaming and only through - the _stream API. - :param openai_inference_id: The unique identifier of the inference endpoint. + :param alibabacloud_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In - this case, `openai`. + this case, `alibabacloud-ai-search`. :param service_settings: Settings used to install the inference model. These - settings are specific to the `openai` service. + settings are specific to the `alibabacloud-ai-search` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") - if openai_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if alibabacloud_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'alibabacloud_inference_id'" + ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), - "openai_inference_id": _quote(openai_inference_id), + "alibabacloud_inference_id": _quote(alibabacloud_inference_id), } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -415,7 +476,1425 @@ def put_openai( params=__query, headers=__headers, body=__body, - endpoint_id="inference.put_openai", + endpoint_id="inference.put_alibabacloud", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_amazonbedrock( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + amazonbedrock_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Amazon Bedrock inference endpoint.

+

Creates an inference endpoint to perform an inference task with the amazonbedrock service.

+
+

info + You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

+
+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `amazonbedrock`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `amazonbedrock` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if amazonbedrock_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'amazonbedrock_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_amazonbedrock", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_anthropic( + self, + *, + task_type: t.Union[str, t.Literal["completion"]], + anthropic_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Anthropic inference endpoint.

+

Create an inference endpoint to perform an inference task with the anthropic service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `completion`. + :param anthropic_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `anthropic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if anthropic_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'anthropic_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "anthropic_inference_id": _quote(anthropic_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_anthropic", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_azureaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Azure AI studio inference endpoint.

+

Create an inference endpoint to perform an inference task with the azureaistudio service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param azureaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureaistudio_inference_id": _quote(azureaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_azureopenai( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureopenai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Azure OpenAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the azureopenai service.

+

The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

+ +

The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param azureopenai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureopenai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `azureopenai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureopenai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureopenai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureopenai_inference_id": _quote(azureopenai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureopenai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_cohere( + self, + *, + task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], + cohere_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Cohere inference endpoint.

+

Create an inference endpoint to perform an inference task with the cohere service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param cohere_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `cohere`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `cohere` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if cohere_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'cohere_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "cohere_inference_id": _quote(cohere_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_cohere", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + def put_eis( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion"]], + eis_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Elastic Inference Service (EIS) inference endpoint.

+

Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param eis_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elastic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elastic` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "eis_inference_id": _quote(eis_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_eis", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_elasticsearch( + self, + *, + task_type: t.Union[ + str, t.Literal["rerank", "sparse_embedding", "text_embedding"] + ], + elasticsearch_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Elasticsearch inference endpoint.

+

Create an inference endpoint to perform an inference task with the elasticsearch service.

+
+

info + Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

+
+

If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

+
+

info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

+
+

After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elasticsearch_inference_id: The unique identifier of the inference endpoint. + The must not match the `model_id`. + :param service: The type of service supported for the specified task type. In + this case, `elasticsearch`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elasticsearch` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elasticsearch_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'elasticsearch_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elasticsearch_inference_id": _quote(elasticsearch_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elasticsearch", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_elser( + self, + *, + task_type: t.Union[str, t.Literal["sparse_embedding"]], + elser_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an ELSER inference endpoint.

+

Create an inference endpoint to perform an inference task with the elser service. + You can also deploy ELSER by using the Elasticsearch inference integration.

+
+

info + Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

+
+

The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

+
+

info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

+
+

After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elser_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elser`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elser` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elser_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'elser_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elser_inference_id": _quote(elser_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elser", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_googleaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + googleaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an Google AI Studio inference endpoint.

+

Create an inference endpoint to perform an inference task with the googleaistudio service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googleaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googleaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googleaistudio` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googleaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googleaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googleaistudio_inference_id": _quote(googleaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googleaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_googlevertexai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + googlevertexai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Google Vertex AI inference endpoint.

+

Create an inference endpoint to perform an inference task with the googlevertexai service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googlevertexai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googlevertexai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googlevertexai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googlevertexai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googlevertexai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googlevertexai_inference_id": _quote(googlevertexai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googlevertexai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_hugging_face( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + huggingface_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Hugging Face inference endpoint.

+

Create an inference endpoint to perform an inference task with the hugging_face service.

+

You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. + Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. + Create the endpoint and copy the URL after the endpoint initialization has been finished.

+

The following models are recommended for the Hugging Face service:

+
    +
  • all-MiniLM-L6-v2
  • +
  • all-MiniLM-L12-v2
  • +
  • all-mpnet-base-v2
  • +
  • e5-base-v2
  • +
  • e5-small-v2
  • +
  • multilingual-e5-base
  • +
  • multilingual-e5-small
  • +
+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param huggingface_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `hugging_face`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `hugging_face` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if huggingface_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'huggingface_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "huggingface_inference_id": _quote(huggingface_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_hugging_face", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_jinaai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + jinaai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an JinaAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the jinaai service.

+

To review the available rerank models, refer to https://jina.ai/reranker. + To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param jinaai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `jinaai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `jinaai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if jinaai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "jinaai_inference_id": _quote(jinaai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_jinaai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_mistral( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + mistral_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Mistral inference endpoint.

+

Creates an inference endpoint to perform an inference task with the mistral service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param mistral_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `mistral`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `mistral` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if mistral_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'mistral_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "mistral_inference_id": _quote(mistral_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_mistral", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_openai( + self, + *, + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], + openai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an OpenAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the openai service.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param openai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `openai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if openai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "openai_inference_id": _quote(openai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_openai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_voyageai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + voyageai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a VoyageAI inference endpoint.

+

Create an inference endpoint to perform an inference task with the voyageai service.

+

Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param voyageai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `voyageai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `voyageai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if voyageai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "voyageai_inference_id": _quote(voyageai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_voyageai", path_parts=__path_parts, ) diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index 7fcc9ada5..eb61be48a 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -762,6 +762,11 @@ class Boolean(Field): :arg fielddata: :arg index: :arg null_value: + :arg ignore_malformed: + :arg script: + :arg on_script_error: + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: @@ -789,6 +794,10 @@ def __init__( ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], @@ -816,6 +825,14 @@ def __init__( kwargs["index"] = index if null_value is not DEFAULT: kwargs["null_value"] = null_value + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if script is not DEFAULT: + kwargs["script"] = script + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: