From e4a83bf4df1adf0cec6d6779233bb5b402f7e487 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 29 Feb 2024 06:07:34 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/__init__.py | 5 +- elasticsearch/_async/client/indices.py | 69 ++++++++++++++++++ elasticsearch/_async/client/inference.py | 92 ++++++++++++++---------- elasticsearch/_sync/client/__init__.py | 5 +- elasticsearch/_sync/client/indices.py | 69 ++++++++++++++++++ elasticsearch/_sync/client/inference.py | 92 ++++++++++++++---------- 6 files changed, 256 insertions(+), 76 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 2a31d9c1a..f42d6df89 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -4641,8 +4641,9 @@ async def update_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs an update on every document in the index without changing the source, - for example to pick up a mapping change. + Updates documents that match the specified query. If no query is specified, performs + an update on every document in the index without changing the source, for example + to pick up a mapping change. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 63bdfcb90..1dff58189 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -3194,6 +3194,75 @@ async def reload_search_analyzers( "POST", __path, params=__query, headers=__headers ) + @_rewrite_parameters() + async def resolve_cluster( + self, + *, + name: t.Union[str, t.Sequence[str]], + allow_no_indices: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union["t.Literal['all', 'closed', 'hidden', 'none', 'open']", str] + ], + t.Union["t.Literal['all', 'closed', 'hidden', 'none', 'open']", str], + ] + ] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Resolves the specified index expressions to return information about each cluster, + including the local cluster, if included. + + ``_ + + :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, + and data streams to resolve. Resources on remote clusters can be specified + using the ``:`` syntax. + :param allow_no_indices: If false, the request returns an error if any wildcard + expression, index alias, or _all value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting foo*,bar* returns an error if an index starts + with foo but no index starts with bar. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored + when frozen. Defaults to false. + :param ignore_unavailable: If false, the request returns an error if it targets + a missing or closed index. Defaults to false. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path = f"/_resolve/cluster/{_quote(name)}" + __query: t.Dict[str, t.Any] = {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices + if error_trace is not None: + __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", __path, params=__query, headers=__headers + ) + @_rewrite_parameters() async def resolve_index( self, diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 5c46e1111..3a1a27025 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -29,8 +29,10 @@ class InferenceClient(NamespacedClient): async def delete_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -41,14 +43,17 @@ async def delete_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace @@ -67,8 +72,10 @@ async def delete_model( async def get_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -79,14 +86,17 @@ async def get_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace @@ -107,9 +117,11 @@ async def get_model( async def inference( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -122,18 +134,21 @@ async def inference( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id :param input: Text input to the model. Either a string or an array of strings. + :param task_type: The task type :param task_settings: Optional task settings """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -164,8 +179,10 @@ async def inference( async def put_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -178,21 +195,24 @@ async def put_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type :param model_config: """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") if model_config is None and body is None: raise ValueError( "Empty value passed for parameters 'model_config' and 'body', one of them should be set." ) elif model_config is not None and body is not None: raise ValueError("Cannot set both 'model_config' and 'body'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 51812f0d9..4cdb97f70 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -4639,8 +4639,9 @@ def update_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs an update on every document in the index without changing the source, - for example to pick up a mapping change. + Updates documents that match the specified query. If no query is specified, performs + an update on every document in the index without changing the source, for example + to pick up a mapping change. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 3b2885df9..3aa31a3e9 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -3194,6 +3194,75 @@ def reload_search_analyzers( "POST", __path, params=__query, headers=__headers ) + @_rewrite_parameters() + def resolve_cluster( + self, + *, + name: t.Union[str, t.Sequence[str]], + allow_no_indices: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union["t.Literal['all', 'closed', 'hidden', 'none', 'open']", str] + ], + t.Union["t.Literal['all', 'closed', 'hidden', 'none', 'open']", str], + ] + ] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Resolves the specified index expressions to return information about each cluster, + including the local cluster, if included. + + ``_ + + :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, + and data streams to resolve. Resources on remote clusters can be specified + using the ``:`` syntax. + :param allow_no_indices: If false, the request returns an error if any wildcard + expression, index alias, or _all value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting foo*,bar* returns an error if an index starts + with foo but no index starts with bar. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored + when frozen. Defaults to false. + :param ignore_unavailable: If false, the request returns an error if it targets + a missing or closed index. Defaults to false. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path = f"/_resolve/cluster/{_quote(name)}" + __query: t.Dict[str, t.Any] = {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices + if error_trace is not None: + __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", __path, params=__query, headers=__headers + ) + @_rewrite_parameters() def resolve_index( self, diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 2716b7d1a..4932439ad 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -29,8 +29,10 @@ class InferenceClient(NamespacedClient): def delete_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -41,14 +43,17 @@ def delete_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace @@ -67,8 +72,10 @@ def delete_model( def get_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -79,14 +86,17 @@ def get_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace @@ -107,9 +117,11 @@ def get_model( def inference( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -122,18 +134,21 @@ def inference( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id :param input: Text input to the model. Either a string or an array of strings. + :param task_type: The task type :param task_settings: Optional task settings """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -164,8 +179,10 @@ def inference( def put_model( self, *, - task_type: t.Union["t.Literal['sparse_embedding', 'text_embedding']", str], - model_id: str, + inference_id: str, + task_type: t.Optional[ + t.Union["t.Literal['sparse_embedding', 'text_embedding']", str] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -178,21 +195,24 @@ def put_model( ``_ - :param task_type: The model task type - :param model_id: The unique identifier of the inference model. + :param inference_id: The inference Id + :param task_type: The task type :param model_config: """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if model_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'model_id'") + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") if model_config is None and body is None: raise ValueError( "Empty value passed for parameters 'model_config' and 'body', one of them should be set." ) elif model_config is not None and body is not None: raise ValueError("Cannot set both 'model_config' and 'body'") - __path = f"/_inference/{_quote(task_type)}/{_quote(model_id)}" + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(task_type)}/{_quote(inference_id)}" + elif inference_id not in SKIP_IN_PATH: + __path = f"/_inference/{_quote(inference_id)}" + else: + raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace