From 10f729f16de6833f2228ed812387fd0b2ee00add Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 10 Mar 2025 06:03:20 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/__init__.py | 4 +- elasticsearch/_async/client/connector.py | 4 +- elasticsearch/_async/client/esql.py | 4 +- elasticsearch/_async/client/indices.py | 29 +- elasticsearch/_async/client/inference.py | 440 +++++++++++++++++++---- elasticsearch/_async/client/ingest.py | 6 +- elasticsearch/_async/client/ml.py | 7 +- elasticsearch/_async/client/simulate.py | 10 +- elasticsearch/_sync/client/__init__.py | 4 +- elasticsearch/_sync/client/connector.py | 4 +- elasticsearch/_sync/client/esql.py | 4 +- elasticsearch/_sync/client/indices.py | 29 +- elasticsearch/_sync/client/inference.py | 440 +++++++++++++++++++---- elasticsearch/_sync/client/ingest.py | 6 +- elasticsearch/_sync/client/ml.py | 7 +- elasticsearch/_sync/client/simulate.py | 10 +- elasticsearch/dsl/query.py | 22 ++ elasticsearch/dsl/types.py | 42 +++ 18 files changed, 856 insertions(+), 216 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 98d006405..b0992bd67 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -3114,7 +3114,7 @@ async def knn_search( - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. @@ -4446,7 +4446,7 @@ async def scripts_painless_execute(

Each context requires a script, but additional parameters depend on the context you're using for that script.

- ``_ + ``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index debf9e3ce..3a55163bb 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -1539,7 +1539,7 @@ async def update_filtering_validation(

Update the draft filtering validation info for a connector.

- ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1710,7 +1710,7 @@ async def update_native(

Update the connector is_native flag.

- ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 0df836730..dca7ca2bb 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -323,7 +323,7 @@ async def async_query_stop( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

- ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -408,7 +408,7 @@ async def query( Get search results for an ES|QL (Elasticsearch query language) query.

- ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 31ee76bca..ac3f93d8f 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -265,7 +265,7 @@ async def cancel_migrate_reindex(

Cancel a migration reindex attempt for a data stream or index.

- ``_ + ``_ :param index: The index or data stream name """ @@ -794,7 +794,7 @@ async def create_from(

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

- ``_ + ``_ :param source: The source index or data stream name :param dest: The destination index or data stream name @@ -2487,6 +2487,7 @@ async def get_field_mapping( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2515,6 +2516,8 @@ async def get_field_mapping( :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. + :param local: If `true`, the request retrieves information from the local node + only. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'fields'") @@ -2542,6 +2545,8 @@ async def get_field_mapping( __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults + if local is not None: + __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2726,7 +2731,7 @@ async def get_migrate_reindex_status(

Get the status of a migration reindex attempt for a data stream or index.

- ``_ + ``_ :param index: The index or data stream name. """ @@ -2945,7 +2950,7 @@ async def migrate_reindex( The persistent task ID is returned immediately and the reindexing work is completed in that task.

- ``_ + ``_ :param reindex: """ @@ -3006,7 +3011,7 @@ async def migrate_to_data_stream( The write index for the alias becomes the write index for the stream.

- ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -3062,7 +3067,7 @@ async def modify_data_stream( Performs one or more data stream modification actions in a single atomic operation.

- ``_ + ``_ :param actions: Actions to perform. """ @@ -3227,7 +3232,7 @@ async def promote_data_stream( This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

- ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -3293,7 +3298,7 @@ async def put_alias( Adds a data stream or index to an alias.

- ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3400,7 +3405,7 @@ async def put_data_lifecycle( Update the data stream lifecycle of the specified data streams.

- ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3528,7 +3533,7 @@ async def put_index_template( If an entry already exists with the same key, then it is overwritten by the new definition.

- ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -5372,7 +5377,7 @@ async def update_aliases( Adds a data stream or index to an alias.

- ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -5451,7 +5456,7 @@ async def validate_query( Validates a query without running it.

- ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 54dd0d32b..e685d1c5c 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -25,6 +25,74 @@ class InferenceClient(NamespacedClient): + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + async def completion( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform completion inference on the service

+ + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/completion/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.completion", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete( self, @@ -33,7 +101,13 @@ async def delete( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, dry_run: t.Optional[bool] = None, @@ -102,7 +176,13 @@ async def get( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, inference_id: t.Optional[str] = None, @@ -155,24 +235,188 @@ async def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_name="inference_config", ) - async def inference( + async def put( self, *, inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ + + ``_ + + :param inference_id: The inference Id + :param inference_config: + :param task_type: The task type + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + async def put_watsonx( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + watsonx_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Watsonx inference endpoint.

+

Creates an inference endpoint to perform an inference task with the watsonxai service. + You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. + You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param watsonx_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `watsonxai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if watsonx_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "watsonx_inference_id": _quote(watsonx_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_watsonx", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + async def rerank( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -180,14 +424,7 @@ async def inference( """ .. raw:: html -

Perform inference on the service.

-

This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

-
-

info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

-
+

Perform rereanking inference on the service

``_ @@ -196,9 +433,7 @@ async def inference( :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. + :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. @@ -208,18 +443,10 @@ async def inference( raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -250,71 +477,48 @@ async def inference( params=__query, headers=__headers, body=__body, - endpoint_id="inference.inference", + endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( - body_name="inference_config", + body_fields=("input", "task_settings"), ) - async def put( + async def sparse_embedding( self, *, inference_id: str, - inference_config: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], - ] - ] = None, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

-

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. - For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. - However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+

Perform sparse embedding inference on the service

- ``_ + ``_ :param inference_id: The inference Id - :param inference_config: - :param task_type: The task type + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") - if inference_config is None and body is None: - raise ValueError( - "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." - ) - elif inference_config is not None and body is not None: - raise ValueError("Cannot set both 'inference_config' and 'body'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -323,15 +527,93 @@ async def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - __body = inference_config if inference_config is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] - "PUT", + "POST", __path, params=__query, headers=__headers, body=__body, - endpoint_id="inference.put", + endpoint_id="inference.sparse_embedding", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + async def text_embedding( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform text embedding inference on the service

+ + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @@ -347,7 +629,13 @@ async def update( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, @@ -403,7 +691,7 @@ async def update( __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] - "POST", + "PUT", __path, params=__query, headers=__headers, diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 1cf0cfe1b..27a0f09f3 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -208,7 +208,7 @@ async def geo_ip_stats( Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

- ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -412,7 +412,7 @@ async def processor_grok( A grok pattern is like a regular expression that supports aliased expressions that can be reused.

- ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -620,7 +620,7 @@ async def put_pipeline( Changes made using this API take effect immediately.

- ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index 80bc46565..6599c0923 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -2616,7 +2616,6 @@ async def get_trained_models( ], ] ] = None, - include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2646,8 +2645,6 @@ async def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. - :param include_model_definition: parameter is deprecated! Use [include=definition] - instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2677,8 +2674,6 @@ async def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include - if include_model_definition is not None: - __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -5733,7 +5728,7 @@ async def validate(

Validate an anomaly detection job.

- ``_ + ``_ :param analysis_config: :param analysis_limits: diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index 3c3d33288..bb636ddb6 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient): body_fields=( "docs", "component_template_substitutions", - "index_template_subtitutions", + "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), @@ -52,7 +52,7 @@ async def ingest( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - index_template_subtitutions: t.Optional[ + index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, @@ -90,7 +90,7 @@ async def ingest( an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. - :param index_template_subtitutions: A map of index template names to substitute + :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can @@ -127,8 +127,8 @@ async def ingest( __body["component_template_substitutions"] = ( component_template_substitutions ) - if index_template_subtitutions is not None: - __body["index_template_subtitutions"] = index_template_subtitutions + if index_template_substitutions is not None: + __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 6d9131995..32d736192 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -3112,7 +3112,7 @@ def knn_search( - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. @@ -4444,7 +4444,7 @@ def scripts_painless_execute(

Each context requires a script, but additional parameters depend on the context you're using for that script.

- ``_ + ``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index fc9b193a1..fe2b931da 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -1539,7 +1539,7 @@ def update_filtering_validation(

Update the draft filtering validation info for a connector.

- ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1710,7 +1710,7 @@ def update_native(

Update the connector is_native flag.

- ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index ce9a3a838..7d29224a9 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -323,7 +323,7 @@ def async_query_stop( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

- ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -408,7 +408,7 @@ def query( Get search results for an ES|QL (Elasticsearch query language) query.

- ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 4d502f1af..939eeaf29 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -265,7 +265,7 @@ def cancel_migrate_reindex(

Cancel a migration reindex attempt for a data stream or index.

- ``_ + ``_ :param index: The index or data stream name """ @@ -794,7 +794,7 @@ def create_from(

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

- ``_ + ``_ :param source: The source index or data stream name :param dest: The destination index or data stream name @@ -2487,6 +2487,7 @@ def get_field_mapping( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2515,6 +2516,8 @@ def get_field_mapping( :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. + :param local: If `true`, the request retrieves information from the local node + only. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'fields'") @@ -2542,6 +2545,8 @@ def get_field_mapping( __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults + if local is not None: + __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2726,7 +2731,7 @@ def get_migrate_reindex_status(

Get the status of a migration reindex attempt for a data stream or index.

- ``_ + ``_ :param index: The index or data stream name. """ @@ -2945,7 +2950,7 @@ def migrate_reindex( The persistent task ID is returned immediately and the reindexing work is completed in that task.

- ``_ + ``_ :param reindex: """ @@ -3006,7 +3011,7 @@ def migrate_to_data_stream( The write index for the alias becomes the write index for the stream.

- ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -3062,7 +3067,7 @@ def modify_data_stream( Performs one or more data stream modification actions in a single atomic operation.

- ``_ + ``_ :param actions: Actions to perform. """ @@ -3227,7 +3232,7 @@ def promote_data_stream( This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

- ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -3293,7 +3298,7 @@ def put_alias( Adds a data stream or index to an alias.

- ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3400,7 +3405,7 @@ def put_data_lifecycle( Update the data stream lifecycle of the specified data streams.

- ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3528,7 +3533,7 @@ def put_index_template( If an entry already exists with the same key, then it is overwritten by the new definition.

- ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -5372,7 +5377,7 @@ def update_aliases( Adds a data stream or index to an alias.

- ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -5451,7 +5456,7 @@ def validate_query( Validates a query without running it.

- ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 911a8a530..2ae2b637d 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -25,6 +25,74 @@ class InferenceClient(NamespacedClient): + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + def completion( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform completion inference on the service

+ + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/completion/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.completion", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete( self, @@ -33,7 +101,13 @@ def delete( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, dry_run: t.Optional[bool] = None, @@ -102,7 +176,13 @@ def get( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, inference_id: t.Optional[str] = None, @@ -155,24 +235,188 @@ def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_name="inference_config", ) - def inference( + def put( self, *, inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ + + ``_ + + :param inference_id: The inference Id + :param inference_config: + :param task_type: The task type + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + def put_watsonx( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + watsonx_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create a Watsonx inference endpoint.

+

Creates an inference endpoint to perform an inference task with the watsonxai service. + You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. + You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

+

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+ + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param watsonx_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `watsonxai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if watsonx_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "watsonx_inference_id": _quote(watsonx_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_watsonx", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + def rerank( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -180,14 +424,7 @@ def inference( """ .. raw:: html -

Perform inference on the service.

-

This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

-
-

info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

-
+

Perform rereanking inference on the service

``_ @@ -196,9 +433,7 @@ def inference( :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. + :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. @@ -208,18 +443,10 @@ def inference( raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -250,71 +477,48 @@ def inference( params=__query, headers=__headers, body=__body, - endpoint_id="inference.inference", + endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( - body_name="inference_config", + body_fields=("input", "task_settings"), ) - def put( + def sparse_embedding( self, *, inference_id: str, - inference_config: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], - ] - ] = None, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

-

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. - For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. - However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+

Perform sparse embedding inference on the service

- ``_ + ``_ :param inference_id: The inference Id - :param inference_config: - :param task_type: The task type + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") - if inference_config is None and body is None: - raise ValueError( - "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." - ) - elif inference_config is not None and body is not None: - raise ValueError("Cannot set both 'inference_config' and 'body'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -323,15 +527,93 @@ def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - __body = inference_config if inference_config is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] - "PUT", + "POST", __path, params=__query, headers=__headers, body=__body, - endpoint_id="inference.put", + endpoint_id="inference.sparse_embedding", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + def text_embedding( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Perform text embedding inference on the service

+ + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @@ -347,7 +629,13 @@ def update( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, @@ -403,7 +691,7 @@ def update( __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] - "POST", + "PUT", __path, params=__query, headers=__headers, diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 2a1b0463d..3a66284e9 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -208,7 +208,7 @@ def geo_ip_stats( Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

- ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -412,7 +412,7 @@ def processor_grok( A grok pattern is like a regular expression that supports aliased expressions that can be reused.

- ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -620,7 +620,7 @@ def put_pipeline( Changes made using this API take effect immediately.

- ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 32c00028f..3713ff741 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -2616,7 +2616,6 @@ def get_trained_models( ], ] ] = None, - include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2646,8 +2645,6 @@ def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. - :param include_model_definition: parameter is deprecated! Use [include=definition] - instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2677,8 +2674,6 @@ def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include - if include_model_definition is not None: - __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -5733,7 +5728,7 @@ def validate(

Validate an anomaly detection job.

- ``_ + ``_ :param analysis_config: :param analysis_limits: diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index ed5442d97..5f22ae433 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient): body_fields=( "docs", "component_template_substitutions", - "index_template_subtitutions", + "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), @@ -52,7 +52,7 @@ def ingest( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - index_template_subtitutions: t.Optional[ + index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, @@ -90,7 +90,7 @@ def ingest( an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. - :param index_template_subtitutions: A map of index template names to substitute + :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can @@ -127,8 +127,8 @@ def ingest( __body["component_template_substitutions"] = ( component_template_substitutions ) - if index_template_subtitutions is not None: - __body["index_template_subtitutions"] = index_template_subtitutions + if index_template_substitutions is not None: + __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index b5808959c..6e87f926c 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -795,6 +795,28 @@ def __init__( ) +class GeoGrid(Query): + """ + Matches `geo_point` and `geo_shape` values that intersect a grid cell + from a GeoGrid aggregation. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "geo_grid" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.GeoGridQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + class GeoPolygon(Query): """ :arg _field: The field to use in this query. diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index d1c39003e..4ea6d8361 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -880,6 +880,48 @@ def __init__( super().__init__(kwargs) +class GeoGridQuery(AttrDict[Any]): + """ + :arg geogrid: + :arg geohash: + :arg geohex: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + geogrid: Union[str, DefaultType] + geohash: Union[str, DefaultType] + geohex: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + geogrid: Union[str, DefaultType] = DEFAULT, + geohash: Union[str, DefaultType] = DEFAULT, + geohex: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if geogrid is not DEFAULT: + kwargs["geogrid"] = geogrid + if geohash is not DEFAULT: + kwargs["geohash"] = geohash + if geohex is not DEFAULT: + kwargs["geohex"] = geohex + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + class GeoHashLocation(AttrDict[Any]): """ :arg geohash: (required)