From ec345622edf003d1de2cc7dcb66b9fc74c96a862 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 6 Jan 2025 11:15:19 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/cat.py | 259 +++--------- elasticsearch/_async/client/connector.py | 237 +++++++++++ elasticsearch/_async/client/inference.py | 16 +- elasticsearch/_async/client/ingest.py | 166 ++++++++ elasticsearch/_async/client/logstash.py | 14 +- elasticsearch/_async/client/migration.py | 19 +- elasticsearch/_async/client/monitoring.py | 3 +- elasticsearch/_async/client/rollup.py | 63 ++- .../_async/client/search_application.py | 66 ++- .../_async/client/searchable_snapshots.py | 12 +- elasticsearch/_async/client/shutdown.py | 31 +- elasticsearch/_async/client/slm.py | 46 ++- elasticsearch/_async/client/snapshot.py | 91 ++++- elasticsearch/_async/client/tasks.py | 22 +- elasticsearch/_async/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_async/client/transform.py | 11 +- elasticsearch/_async/client/watcher.py | 53 ++- elasticsearch/_async/client/xpack.py | 10 +- elasticsearch/_sync/client/cat.py | 259 +++--------- elasticsearch/_sync/client/connector.py | 237 +++++++++++ elasticsearch/_sync/client/inference.py | 16 +- elasticsearch/_sync/client/ingest.py | 166 ++++++++ elasticsearch/_sync/client/logstash.py | 14 +- elasticsearch/_sync/client/migration.py | 19 +- elasticsearch/_sync/client/monitoring.py | 3 +- elasticsearch/_sync/client/rollup.py | 63 ++- .../_sync/client/search_application.py | 66 ++- .../_sync/client/searchable_snapshots.py | 12 +- elasticsearch/_sync/client/shutdown.py | 31 +- elasticsearch/_sync/client/slm.py | 46 ++- elasticsearch/_sync/client/snapshot.py | 91 ++++- elasticsearch/_sync/client/tasks.py | 22 +- elasticsearch/_sync/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_sync/client/transform.py | 11 +- elasticsearch/_sync/client/watcher.py | 53 ++- elasticsearch/_sync/client/xpack.py | 10 +- 36 files changed, 2410 insertions(+), 578 deletions(-) diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index be38db139..c03c3e348 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -308,8 +308,6 @@ async def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -332,11 +330,6 @@ async def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -362,10 +355,6 @@ async def count( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -396,8 +385,6 @@ async def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -418,11 +405,6 @@ async def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -450,10 +432,6 @@ async def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -480,8 +458,6 @@ async def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -510,11 +486,6 @@ async def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -537,10 +508,6 @@ async def health( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -562,66 +529,15 @@ async def health( ) @_rewrite_parameters() - async def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + async def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -656,7 +572,6 @@ async def indices( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, @@ -694,10 +609,6 @@ async def indices( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting @@ -734,8 +645,6 @@ async def indices( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -894,8 +803,6 @@ async def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -945,7 +852,9 @@ async def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -966,11 +875,6 @@ async def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -1000,10 +904,6 @@ async def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1073,8 +973,6 @@ async def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1145,11 +1043,6 @@ async def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1177,10 +1070,6 @@ async def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1349,8 +1238,6 @@ async def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1518,11 +1405,6 @@ async def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1552,10 +1434,6 @@ async def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1635,8 +1513,6 @@ async def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1683,6 +1559,9 @@ async def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1708,14 +1587,10 @@ async def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1744,16 +1619,14 @@ async def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1855,10 +1728,12 @@ async def nodes( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1879,14 +1754,11 @@ async def nodes( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1910,14 +1782,14 @@ async def nodes( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1944,6 +1816,9 @@ async def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1967,6 +1842,7 @@ async def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1992,6 +1868,8 @@ async def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2014,6 +1892,7 @@ async def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -2033,6 +1912,7 @@ async def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2058,6 +1938,8 @@ async def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2094,10 +1976,11 @@ async def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2124,14 +2007,10 @@ async def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2160,14 +2039,12 @@ async def recovery( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2358,10 +2235,12 @@ async def shards( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2381,14 +2260,11 @@ async def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2413,14 +2289,14 @@ async def shards( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2445,10 +2321,12 @@ async def snapshots( help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2470,14 +2348,11 @@ async def snapshots( be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2502,14 +2377,14 @@ async def snapshots( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2535,13 +2410,16 @@ async def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Returns information about tasks currently executing in the cluster. IMPORTANT: @@ -2559,18 +2437,18 @@ async def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2591,20 +2469,22 @@ async def tasks( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -2883,8 +2763,6 @@ async def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2998,11 +2876,6 @@ async def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -3033,10 +2906,6 @@ async def transforms( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index ce63350a1..bb6262c93 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -589,6 +589,125 @@ async def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_delete( @@ -634,6 +753,64 @@ async def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_get( @@ -1032,6 +1209,66 @@ async def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 860430d87..501a541ce 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -255,7 +255,21 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index c2fe6e688..a0972a254 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -77,6 +77,57 @@ async def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Deletes an IP location database configuration. + + ``_ + + :param id: A comma-separated list of IP location database configurations to delete + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete_pipeline( self, @@ -217,6 +268,57 @@ async def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_pipeline( self, @@ -384,6 +486,70 @@ async def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + async def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: ID of the database configuration to create or update. + :param configuration: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 8701d0cb5..046b0e037 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -36,11 +36,12 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index 8faacb577..cacf1e9e1 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -36,9 +36,10 @@ async def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ async def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ async def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index c5c13d77b..2439d73d7 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -42,7 +42,8 @@ async def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 5ecd9308c..9774cd0ed 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -43,7 +43,20 @@ async def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ async def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ async def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ async def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ async def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ async def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 984200467..bd3928713 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -216,7 +216,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -351,6 +351,70 @@ async def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 798a32c11..029fba754 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -44,7 +44,8 @@ async def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ async def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ async def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 0301435c9..e4117bff8 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -42,8 +42,13 @@ async def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ async def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ async def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 6a73b1d3b..1164a6006 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -36,7 +36,9 @@ async def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ async def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ async def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ async def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ async def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index d07ce6479..adf46e3b4 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -44,8 +44,8 @@ async def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ async def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ async def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ async def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ async def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ async def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ async def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ async def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 8ec12f8fa..b65a15997 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -47,7 +47,17 @@ async def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ async def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ async def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index db4ae5d73..a0d126037 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + async def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + async def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ async def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ async def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ async def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ async def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ async def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 1eb51214a..52d4c886f 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -844,13 +844,20 @@ async def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index e83b8328c..be0da66f6 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -37,7 +37,11 @@ async def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ async def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ async def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ async def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ async def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ async def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ async def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ async def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 9f3c6a775..f449b4cc4 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -43,7 +43,10 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ async def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_ diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 184146e51..15b0ac240 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -308,8 +308,6 @@ def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -332,11 +330,6 @@ def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -362,10 +355,6 @@ def count( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -396,8 +385,6 @@ def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -418,11 +405,6 @@ def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -450,10 +432,6 @@ def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -480,8 +458,6 @@ def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -510,11 +486,6 @@ def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -537,10 +508,6 @@ def health( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -562,66 +529,15 @@ def health( ) @_rewrite_parameters() - def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return self.perform_request( # type: ignore[return-value] "GET", @@ -656,7 +572,6 @@ def indices( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, @@ -694,10 +609,6 @@ def indices( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting @@ -734,8 +645,6 @@ def indices( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -894,8 +803,6 @@ def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -945,7 +852,9 @@ def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -966,11 +875,6 @@ def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -1000,10 +904,6 @@ def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1073,8 +973,6 @@ def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1145,11 +1043,6 @@ def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1177,10 +1070,6 @@ def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1349,8 +1238,6 @@ def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1518,11 +1405,6 @@ def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1552,10 +1434,6 @@ def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1635,8 +1513,6 @@ def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1683,6 +1559,9 @@ def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1708,14 +1587,10 @@ def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1744,16 +1619,14 @@ def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1855,10 +1728,12 @@ def nodes( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1879,14 +1754,11 @@ def nodes( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1910,14 +1782,14 @@ def nodes( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1944,6 +1816,9 @@ def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1967,6 +1842,7 @@ def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1992,6 +1868,8 @@ def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2014,6 +1892,7 @@ def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -2033,6 +1912,7 @@ def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2058,6 +1938,8 @@ def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2094,10 +1976,11 @@ def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2124,14 +2007,10 @@ def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2160,14 +2039,12 @@ def recovery( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2358,10 +2235,12 @@ def shards( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2381,14 +2260,11 @@ def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2413,14 +2289,14 @@ def shards( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2445,10 +2321,12 @@ def snapshots( help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2470,14 +2348,11 @@ def snapshots( be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2502,14 +2377,14 @@ def snapshots( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2535,13 +2410,16 @@ def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Returns information about tasks currently executing in the cluster. IMPORTANT: @@ -2559,18 +2437,18 @@ def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2591,20 +2469,22 @@ def tasks( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -2883,8 +2763,6 @@ def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2998,11 +2876,6 @@ def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -3033,10 +2906,6 @@ def transforms( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 313ca16e2..6a1ee9752 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -589,6 +589,125 @@ def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_delete( @@ -634,6 +753,64 @@ def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_get( @@ -1032,6 +1209,66 @@ def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 051977c8d..2cb5707f1 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -255,7 +255,21 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 49032c8f1..ae785811d 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -77,6 +77,57 @@ def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Deletes an IP location database configuration. + + ``_ + + :param id: A comma-separated list of IP location database configurations to delete + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete_pipeline( self, @@ -217,6 +268,57 @@ def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_pipeline( self, @@ -384,6 +486,70 @@ def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: ID of the database configuration to create or update. + :param configuration: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index f36492625..6e0878941 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -36,11 +36,12 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index 6c81b32cd..74cffaaa9 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -36,9 +36,10 @@ def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index c9a91934e..455a78304 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -42,7 +42,8 @@ def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 3916fe05d..fa4d54c0c 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -43,7 +43,20 @@ def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 72dd67ba3..97b36b582 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -216,7 +216,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -351,6 +351,70 @@ def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 4c316b546..7793f48a1 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -44,7 +44,8 @@ def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index e08eb469a..bfa561089 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -42,8 +42,13 @@ def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index e780bcc9b..92bc47fe9 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -36,7 +36,9 @@ def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 69e2336ae..b628ad6a8 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -44,8 +44,8 @@ def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index bbdb5c507..22fd53d80 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -47,7 +47,17 @@ def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index a8b8d3c13..32b536a0b 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 6e7edc4ce..d44e65432 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -844,13 +844,20 @@ def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index bf4439f4d..89fb4b9b0 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -37,7 +37,11 @@ def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 5bf07a226..4d0ea7bda 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -43,7 +43,10 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_