`_
:param query: The ES|QL query API accepts an ES|QL query string in the query
parameter, runs it, and returns the results.
diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py
index 9778bb1d5..f1ea60007 100644
--- a/elasticsearch/_async/client/fleet.py
+++ b/elasticsearch/_async/client/fleet.py
@@ -155,9 +155,9 @@ async def msearch(
example, a request targeting foo*,bar* returns an error if an index starts
with foo but no index starts with bar.
:param allow_partial_search_results: If true, returns partial results if there
- are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
- If false, returns an error with no partial results. Defaults to the configured
- cluster setting `search.default_allow_partial_results` which is true by default.
+ are shard request timeouts or shard failures. If false, returns an error
+ with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+ which is true by default.
:param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating
node and remote clusters are minimized for cross-cluster search requests.
:param expand_wildcards: Type of index that wildcard expressions can match. If
@@ -401,9 +401,9 @@ async def search(
:param aggs:
:param allow_no_indices:
:param allow_partial_search_results: If true, returns partial results if there
- are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
- If false, returns an error with no partial results. Defaults to the configured
- cluster setting `search.default_allow_partial_results` which is true by default.
+ are shard request timeouts or shard failures. If false, returns an error
+ with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+ which is true by default.
:param analyze_wildcard:
:param analyzer:
:param batched_reduce_size:
diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py
index 3678ae4a9..1e8c60aaa 100644
--- a/elasticsearch/_async/client/inference.py
+++ b/elasticsearch/_async/client/inference.py
@@ -234,6 +234,113 @@ async def get(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("input", "query", "task_settings"),
+ )
+ async def inference(
+ self,
+ *,
+ inference_id: str,
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ task_type: t.Optional[
+ t.Union[
+ str,
+ t.Literal[
+ "chat_completion",
+ "completion",
+ "rerank",
+ "sparse_embedding",
+ "text_embedding",
+ ],
+ ]
+ ] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ query: t.Optional[str] = None,
+ task_settings: t.Optional[t.Any] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Perform inference on the service.
+ This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
+ It returns a response with the results of the tasks.
+ The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.
+ For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.
+
+ info
+ The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
+
+
+
+ ``_
+
+ :param inference_id: The unique identifier for the inference endpoint.
+ :param input: The text on which you want to perform the inference task. It can
+ be a single string or an array. > info > Inference endpoints for the `completion`
+ task type currently only support a single string as input.
+ :param task_type: The type of inference task that the model performs.
+ :param query: The query input, which is required only for the `rerank` task.
+ It is not required for other tasks.
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the task type you specified and override the task
+ settings specified when initializing the service.
+ :param timeout: The amount of time to wait for the inference request to complete.
+ """
+ if inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'inference_id'")
+ if input is None and body is None:
+ raise ValueError("Empty value passed for parameter 'input'")
+ __path_parts: t.Dict[str, str]
+ if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
+ __path_parts = {
+ "task_type": _quote(task_type),
+ "inference_id": _quote(inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}'
+ elif inference_id not in SKIP_IN_PATH:
+ __path_parts = {"inference_id": _quote(inference_id)}
+ __path = f'/_inference/{__path_parts["inference_id"]}'
+ else:
+ raise ValueError("Couldn't find a path for the given parameters")
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if input is not None:
+ __body["input"] = input
+ if query is not None:
+ __body["query"] = query
+ if task_settings is not None:
+ __body["task_settings"] = task_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return await self.perform_request( # type: ignore[return-value]
+ "POST",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.inference",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_name="inference_config",
)
diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py
index 1cf0cfe1b..0ea5ca9fd 100644
--- a/elasticsearch/_async/client/ingest.py
+++ b/elasticsearch/_async/client/ingest.py
@@ -208,7 +208,7 @@ async def geo_ip_stats(
Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
- ``_
+ ``_
"""
__path_parts: t.Dict[str, str] = {}
__path = "/_ingest/geoip/stats"
@@ -412,7 +412,7 @@ async def processor_grok(
A grok pattern is like a regular expression that supports aliased expressions that can be reused.
- ``_
+ ``_
"""
__path_parts: t.Dict[str, str] = {}
__path = "/_ingest/processor/grok"
@@ -620,7 +620,7 @@ async def put_pipeline(
Changes made using this API take effect immediately.
- ``_
+ ``_
:param id: ID of the ingest pipeline to create or update.
:param deprecated: Marks this ingest pipeline as deprecated. When a deprecated
diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py
index be00377a0..1b007e7cb 100644
--- a/elasticsearch/_async/client/nodes.py
+++ b/elasticsearch/_async/client/nodes.py
@@ -108,7 +108,7 @@ async def get_repositories_metering_info(
``_
:param node_id: Comma-separated list of node IDs or names used to limit returned
- information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
+ information.
"""
if node_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'node_id'")
diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py
index 9c1f993c4..ecead9049 100644
--- a/elasticsearch/_async/client/snapshot.py
+++ b/elasticsearch/_async/client/snapshot.py
@@ -105,7 +105,6 @@ async def clone(
human: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
pretty: t.Optional[bool] = None,
- timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
@@ -126,8 +125,6 @@ async def clone(
:param master_timeout: The period to wait for the master node. If the master
node is not available before the timeout expires, the request fails and returns
an error. To indicate that the request should never timeout, set it to `-1`.
- :param timeout: The period of time to wait for a response. If no response is
- received before the timeout expires, the request fails and returns an error.
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'repository'")
@@ -155,8 +152,6 @@ async def clone(
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
- if timeout is not None:
- __query["timeout"] = timeout
if not __body:
if indices is not None:
__body["indices"] = indices
diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py
index 6ac45cb31..7b2fbcbaa 100644
--- a/elasticsearch/_sync/client/__init__.py
+++ b/elasticsearch/_sync/client/__init__.py
@@ -3048,7 +3048,7 @@ def knn_search(
- ``_
+ ``_
:param index: A comma-separated list of index names to search; use `_all` or
to perform the operation on all indices.
@@ -4386,7 +4386,7 @@ def scripts_painless_execute(
Each context requires a script, but additional parameters depend on the context you're using for that script.
- ``_
+ ``_
:param context: The context that the script should run in. NOTE: Result ordering
in the field contexts is not guaranteed.
diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py
index c40bd06f5..3f1dea979 100644
--- a/elasticsearch/_sync/client/esql.py
+++ b/elasticsearch/_sync/client/esql.py
@@ -416,7 +416,7 @@ def query(
Get search results for an ES|QL (Elasticsearch query language) query.
- ``_
+ ``_
:param query: The ES|QL query API accepts an ES|QL query string in the query
parameter, runs it, and returns the results.
diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py
index 820e661cb..44178398d 100644
--- a/elasticsearch/_sync/client/fleet.py
+++ b/elasticsearch/_sync/client/fleet.py
@@ -155,9 +155,9 @@ def msearch(
example, a request targeting foo*,bar* returns an error if an index starts
with foo but no index starts with bar.
:param allow_partial_search_results: If true, returns partial results if there
- are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
- If false, returns an error with no partial results. Defaults to the configured
- cluster setting `search.default_allow_partial_results` which is true by default.
+ are shard request timeouts or shard failures. If false, returns an error
+ with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+ which is true by default.
:param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating
node and remote clusters are minimized for cross-cluster search requests.
:param expand_wildcards: Type of index that wildcard expressions can match. If
@@ -401,9 +401,9 @@ def search(
:param aggs:
:param allow_no_indices:
:param allow_partial_search_results: If true, returns partial results if there
- are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
- If false, returns an error with no partial results. Defaults to the configured
- cluster setting `search.default_allow_partial_results` which is true by default.
+ are shard request timeouts or shard failures. If false, returns an error
+ with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+ which is true by default.
:param analyze_wildcard:
:param analyzer:
:param batched_reduce_size:
diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py
index f876066eb..1826c5d51 100644
--- a/elasticsearch/_sync/client/inference.py
+++ b/elasticsearch/_sync/client/inference.py
@@ -234,6 +234,113 @@ def get(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("input", "query", "task_settings"),
+ )
+ def inference(
+ self,
+ *,
+ inference_id: str,
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ task_type: t.Optional[
+ t.Union[
+ str,
+ t.Literal[
+ "chat_completion",
+ "completion",
+ "rerank",
+ "sparse_embedding",
+ "text_embedding",
+ ],
+ ]
+ ] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ query: t.Optional[str] = None,
+ task_settings: t.Optional[t.Any] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Perform inference on the service.
+ This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
+ It returns a response with the results of the tasks.
+ The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.
+ For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.
+
+ info
+ The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
+
+
+
+ ``_
+
+ :param inference_id: The unique identifier for the inference endpoint.
+ :param input: The text on which you want to perform the inference task. It can
+ be a single string or an array. > info > Inference endpoints for the `completion`
+ task type currently only support a single string as input.
+ :param task_type: The type of inference task that the model performs.
+ :param query: The query input, which is required only for the `rerank` task.
+ It is not required for other tasks.
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the task type you specified and override the task
+ settings specified when initializing the service.
+ :param timeout: The amount of time to wait for the inference request to complete.
+ """
+ if inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'inference_id'")
+ if input is None and body is None:
+ raise ValueError("Empty value passed for parameter 'input'")
+ __path_parts: t.Dict[str, str]
+ if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
+ __path_parts = {
+ "task_type": _quote(task_type),
+ "inference_id": _quote(inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}'
+ elif inference_id not in SKIP_IN_PATH:
+ __path_parts = {"inference_id": _quote(inference_id)}
+ __path = f'/_inference/{__path_parts["inference_id"]}'
+ else:
+ raise ValueError("Couldn't find a path for the given parameters")
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if input is not None:
+ __body["input"] = input
+ if query is not None:
+ __body["query"] = query
+ if task_settings is not None:
+ __body["task_settings"] = task_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return self.perform_request( # type: ignore[return-value]
+ "POST",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.inference",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_name="inference_config",
)
diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py
index 2a1b0463d..023f32235 100644
--- a/elasticsearch/_sync/client/ingest.py
+++ b/elasticsearch/_sync/client/ingest.py
@@ -208,7 +208,7 @@ def geo_ip_stats(
Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
- ``_
+ ``_
"""
__path_parts: t.Dict[str, str] = {}
__path = "/_ingest/geoip/stats"
@@ -412,7 +412,7 @@ def processor_grok(
A grok pattern is like a regular expression that supports aliased expressions that can be reused.
- ``_
+ ``_
"""
__path_parts: t.Dict[str, str] = {}
__path = "/_ingest/processor/grok"
@@ -620,7 +620,7 @@ def put_pipeline(
Changes made using this API take effect immediately.
- ``_
+ ``_
:param id: ID of the ingest pipeline to create or update.
:param deprecated: Marks this ingest pipeline as deprecated. When a deprecated
diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py
index e300ba3e3..ef6c67b10 100644
--- a/elasticsearch/_sync/client/nodes.py
+++ b/elasticsearch/_sync/client/nodes.py
@@ -108,7 +108,7 @@ def get_repositories_metering_info(
``_
:param node_id: Comma-separated list of node IDs or names used to limit returned
- information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
+ information.
"""
if node_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'node_id'")
diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py
index 26c841765..ae80bb2a7 100644
--- a/elasticsearch/_sync/client/snapshot.py
+++ b/elasticsearch/_sync/client/snapshot.py
@@ -105,7 +105,6 @@ def clone(
human: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
pretty: t.Optional[bool] = None,
- timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
@@ -126,8 +125,6 @@ def clone(
:param master_timeout: The period to wait for the master node. If the master
node is not available before the timeout expires, the request fails and returns
an error. To indicate that the request should never timeout, set it to `-1`.
- :param timeout: The period of time to wait for a response. If no response is
- received before the timeout expires, the request fails and returns an error.
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'repository'")
@@ -155,8 +152,6 @@ def clone(
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
- if timeout is not None:
- __query["timeout"] = timeout
if not __body:
if indices is not None:
__body["indices"] = indices
diff --git a/elasticsearch/dsl/aggs.py b/elasticsearch/dsl/aggs.py
index a20373163..97ef48d59 100644
--- a/elasticsearch/dsl/aggs.py
+++ b/elasticsearch/dsl/aggs.py
@@ -679,9 +679,8 @@ class CategorizeText(Bucket[_R]):
:arg categorization_analyzer: The categorization analyzer specifies
how the text is analyzed and tokenized before being categorized.
The syntax is very similar to that used to define the analyzer in
- the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsear
- ch/reference/8.0/indices-analyze.html). This property cannot be
- used at the same time as categorization_filters.
+ the analyze API. This property cannot be used at the same time as
+ `categorization_filters`.
:arg shard_size: The number of categorization buckets to return from
each shard before merging all the results.
:arg size: The number of buckets to return. Defaults to `10` if