From 1f8c0faf62cc6e2ff20d02ec5a71e430c91ed23a Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 6 Jan 2025 11:15:12 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/__init__.py | 10 + elasticsearch/_async/client/cat.py | 161 ++++---- elasticsearch/_async/client/ccr.py | 90 ++++- elasticsearch/_async/client/connector.py | 237 +++++++++++ elasticsearch/_async/client/eql.py | 12 + elasticsearch/_async/client/features.py | 28 +- elasticsearch/_async/client/ilm.py | 75 +++- elasticsearch/_async/client/indices.py | 266 +++++++++++-- elasticsearch/_async/client/inference.py | 16 +- elasticsearch/_async/client/ingest.py | 177 ++++++++- elasticsearch/_async/client/license.py | 46 ++- elasticsearch/_async/client/logstash.py | 14 +- elasticsearch/_async/client/migration.py | 19 +- elasticsearch/_async/client/ml.py | 56 ++- elasticsearch/_async/client/monitoring.py | 3 +- elasticsearch/_async/client/rollup.py | 63 ++- .../_async/client/search_application.py | 131 +++++- .../_async/client/searchable_snapshots.py | 12 +- elasticsearch/_async/client/security.py | 224 +++++++++++ elasticsearch/_async/client/shutdown.py | 31 +- elasticsearch/_async/client/slm.py | 46 ++- elasticsearch/_async/client/snapshot.py | 91 ++++- elasticsearch/_async/client/tasks.py | 22 +- elasticsearch/_async/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_async/client/transform.py | 11 +- elasticsearch/_async/client/watcher.py | 53 ++- elasticsearch/_async/client/xpack.py | 10 +- elasticsearch/_sync/client/__init__.py | 10 + elasticsearch/_sync/client/cat.py | 161 ++++---- elasticsearch/_sync/client/ccr.py | 90 ++++- elasticsearch/_sync/client/connector.py | 237 +++++++++++ elasticsearch/_sync/client/eql.py | 12 + elasticsearch/_sync/client/features.py | 28 +- elasticsearch/_sync/client/ilm.py | 75 +++- elasticsearch/_sync/client/indices.py | 266 +++++++++++-- elasticsearch/_sync/client/inference.py | 16 +- elasticsearch/_sync/client/ingest.py | 177 ++++++++- elasticsearch/_sync/client/license.py | 46 ++- elasticsearch/_sync/client/logstash.py | 14 +- elasticsearch/_sync/client/migration.py | 19 +- elasticsearch/_sync/client/ml.py | 56 ++- elasticsearch/_sync/client/monitoring.py | 3 +- elasticsearch/_sync/client/rollup.py | 63 ++- .../_sync/client/search_application.py | 131 +++++- .../_sync/client/searchable_snapshots.py | 12 +- elasticsearch/_sync/client/security.py | 224 +++++++++++ elasticsearch/_sync/client/shutdown.py | 31 +- elasticsearch/_sync/client/slm.py | 46 ++- elasticsearch/_sync/client/snapshot.py | 91 ++++- elasticsearch/_sync/client/tasks.py | 22 +- elasticsearch/_sync/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_sync/client/transform.py | 11 +- elasticsearch/_sync/client/watcher.py | 53 ++- elasticsearch/_sync/client/xpack.py | 10 +- 54 files changed, 4002 insertions(+), 556 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 3ad11a9b8..5675987f9 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -626,12 +626,14 @@ async def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -651,6 +653,8 @@ async def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -661,6 +665,8 @@ async def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -694,6 +700,8 @@ async def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -702,6 +710,8 @@ async def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 44c4a7929..54c1c2b0d 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -302,7 +302,6 @@ async def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -325,7 +324,6 @@ async def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -351,8 +349,6 @@ async def count( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -383,7 +379,6 @@ async def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -405,7 +400,6 @@ async def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -433,8 +427,6 @@ async def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -461,7 +453,6 @@ async def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -490,7 +481,6 @@ async def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -513,8 +503,6 @@ async def health( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -536,59 +524,15 @@ async def health( ) @_rewrite_parameters() - async def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + async def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -854,7 +798,6 @@ async def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -904,7 +847,9 @@ async def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -925,7 +870,6 @@ async def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -955,8 +899,6 @@ async def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1026,7 +968,6 @@ async def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1097,7 +1038,6 @@ async def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1125,8 +1065,6 @@ async def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1295,7 +1233,6 @@ async def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1463,7 +1400,6 @@ async def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1493,8 +1429,6 @@ async def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1574,7 +1508,6 @@ async def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1621,6 +1554,9 @@ async def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1646,10 +1582,10 @@ async def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1678,14 +1614,14 @@ async def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1790,6 +1726,9 @@ async def nodes( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1814,6 +1753,7 @@ async def nodes( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1843,6 +1783,8 @@ async def nodes( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1869,6 +1811,9 @@ async def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1892,6 +1837,7 @@ async def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1917,6 +1863,8 @@ async def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1939,6 +1887,7 @@ async def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -1958,6 +1907,7 @@ async def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -1983,6 +1933,8 @@ async def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2019,9 +1971,11 @@ async def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2048,10 +2002,10 @@ async def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2080,12 +2034,12 @@ async def recovery( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2108,6 +2062,7 @@ async def repositories( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2126,6 +2081,10 @@ async def repositories( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param local: If `true`, the request computes the list of selected nodes from + the local cluster state. If `false` the list of selected nodes are computed + from the cluster state of the master node. In both cases the coordinating + node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as @@ -2147,6 +2106,8 @@ async def repositories( __query["help"] = help if human is not None: __query["human"] = human + if local is not None: + __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -2272,6 +2233,9 @@ async def shards( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2295,6 +2259,7 @@ async def shards( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2325,6 +2290,8 @@ async def shards( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2352,6 +2319,9 @@ async def snapshots( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2377,6 +2347,7 @@ async def snapshots( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2407,6 +2378,8 @@ async def snapshots( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2432,12 +2405,16 @@ async def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get task information. Get information about tasks currently running in the cluster. @@ -2455,14 +2432,18 @@ async def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2483,18 +2464,22 @@ async def tasks( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -2773,7 +2758,6 @@ async def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2887,7 +2871,6 @@ async def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -2918,8 +2901,6 @@ async def transforms( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index 66b794ce1..a86dca8f7 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -36,7 +36,8 @@ async def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -111,7 +112,10 @@ async def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ @@ -231,8 +235,10 @@ async def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -273,8 +279,9 @@ async def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -327,7 +334,23 @@ async def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -383,8 +406,7 @@ async def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -428,7 +450,14 @@ async def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -469,8 +498,10 @@ async def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -545,9 +576,14 @@ async def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -671,7 +707,11 @@ async def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -736,7 +776,11 @@ async def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -818,7 +862,8 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -854,8 +899,13 @@ async def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 86dc55b96..d65ef97f8 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -589,6 +589,125 @@ async def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_delete( @@ -634,6 +753,64 @@ async def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_get( @@ -1032,6 +1209,66 @@ async def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 705a799f6..f0395ef2d 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -167,6 +167,8 @@ async def get_status( @_rewrite_parameters( body_fields=( "query", + "allow_partial_search_results", + "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", @@ -189,6 +191,8 @@ async def search( index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, + allow_partial_search_results: t.Optional[bool] = None, + allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, @@ -234,6 +238,8 @@ async def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: + :param allow_partial_search_results: + :param allow_partial_sequence_results: :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. @@ -287,6 +293,12 @@ async def search( if not __body: if query is not None: __body["query"] = query + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results + if allow_partial_sequence_results is not None: + __body["allow_partial_sequence_results"] = ( + allow_partial_sequence_results + ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index afa814cfd..e3fc9f8a1 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -35,8 +35,17 @@ async def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ async def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 9422f6be4..912b6ea2a 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -38,9 +38,9 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ async def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ async def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ async def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ async def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index a284f40a8..788558041 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -245,8 +245,8 @@ async def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ async def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ async def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ async def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1457,8 +1501,8 @@ async def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1524,7 +1568,10 @@ async def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1612,7 +1659,22 @@ async def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1695,7 +1757,21 @@ async def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2681,8 +2757,18 @@ async def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -3335,7 +3421,16 @@ async def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3415,9 +3510,25 @@ async def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3551,7 +3662,21 @@ async def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3615,9 +3740,20 @@ async def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3868,8 +4004,9 @@ async def segments( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3945,8 +4082,14 @@ async def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4017,7 +4160,39 @@ async def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4302,7 +4477,27 @@ async def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4394,8 +4589,14 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4498,7 +4699,8 @@ async def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index b7fd1b7a3..1e248fc0c 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -255,7 +255,21 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 8fbb6876b..7c7d870e8 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -77,6 +77,59 @@ async def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete IP geolocation database configurations. + + ``_ + + :param id: A comma-separated list of IP location database configurations. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. A value + of `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete_pipeline( self, @@ -217,6 +270,58 @@ async def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get IP geolocation database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_pipeline( self, @@ -328,8 +433,8 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update GeoIP database configurations. Create or update IP geolocation - database configurations. + Create or update a GeoIP database configuration. Refer to the create or update + IP geolocation database configuration API. ``_ @@ -384,6 +489,74 @@ async def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + async def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create or update an IP geolocation database configuration. + + ``_ + + :param id: The database configuration identifier. + :param configuration: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response indicates that it was not completely acknowledged. A value of + `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index a43d7064d..014bc3e8f 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -35,7 +35,9 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ async def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ async def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ async def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ async def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 882a1f633..25e9f82be 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -36,11 +36,12 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index cbfc2c7d2..c43441ec3 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -36,9 +36,10 @@ async def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ async def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ async def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index be77a4c69..76e0eb18c 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -2488,6 +2488,7 @@ async def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ async def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ async def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -2697,7 +2702,7 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Return ML defaults and limits. Returns defaults and limits used by machine learning. + Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out @@ -3169,9 +3174,11 @@ async def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) async def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ async def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ async def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ async def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ async def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ async def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ async def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ async def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ async def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ async def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ async def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ async def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ async def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: @@ -5469,7 +5521,7 @@ async def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection detector. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index aa8dc41fc..416042fb1 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -42,7 +42,8 @@ async def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 6a3a4a057..e68df368d 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -43,7 +43,20 @@ async def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ async def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ async def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ async def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ async def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ async def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index d5a3d150b..d4772b248 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -216,7 +216,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -251,6 +251,71 @@ async def list( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="payload", + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def post_behavioral_analytics_event( + self, + *, + collection_name: str, + event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], + payload: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + debug: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a behavioral analytics collection event. + + ``_ + + :param collection_name: The name of the behavioral analytics collection. + :param event_type: The analytics event type. + :param payload: + :param debug: Whether the response type has to include more details + """ + if collection_name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'collection_name'") + if event_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'event_type'") + if payload is None and body is None: + raise ValueError( + "Empty value passed for parameters 'payload' and 'body', one of them should be set." + ) + elif payload is not None and body is not None: + raise ValueError("Cannot set both 'payload' and 'body'") + __path_parts: t.Dict[str, str] = { + "collection_name": _quote(collection_name), + "event_type": _quote(event_type), + } + __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' + __query: t.Dict[str, t.Any] = {} + if debug is not None: + __query["debug"] = debug + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = payload if payload is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.post_behavioral_analytics_event", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="search_application", ) @@ -351,6 +416,70 @@ async def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index bb7f01b39..645d60d89 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -44,7 +44,8 @@ async def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ async def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ async def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index bf6d8b809..1902de328 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -2324,6 +2324,230 @@ async def invalidate_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("nonce", "redirect_uri", "state", "realm"), + ) + async def oidc_authenticate( + self, + *, + nonce: t.Optional[str] = None, + redirect_uri: t.Optional[str] = None, + state: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Authenticate OpenID Connect. Exchange an OpenID Connect authentication response + message for an Elasticsearch internal access token and refresh token that can + be subsequently used for authentication. Elasticsearch exposes all the necessary + OpenID Connect related functionality with the OpenID Connect APIs. These APIs + are used internally by Kibana in order to provide OpenID Connect based authentication, + but can also be used by other, custom web applications or other clients. + + ``_ + + :param nonce: Associate a client session with an ID token and mitigate replay + attacks. This value needs to be the same as the one that was provided to + the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + and included in the response to that call. + :param redirect_uri: The URL to which the OpenID Connect Provider redirected + the User Agent in response to an authentication request after a successful + authentication. This URL must be provided as-is (URL encoded), taken from + the body of the response or as the value of a location header in the response + from the OpenID Connect Provider. + :param state: Maintain state between the authentication request and the response. + This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` + API or the one that was generated by Elasticsearch and included in the response + to that call. + :param realm: The name of the OpenID Connect realm. This property is useful in + cases where multiple realms are defined. + """ + if nonce is None and body is None: + raise ValueError("Empty value passed for parameter 'nonce'") + if redirect_uri is None and body is None: + raise ValueError("Empty value passed for parameter 'redirect_uri'") + if state is None and body is None: + raise ValueError("Empty value passed for parameter 'state'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/authenticate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if nonce is not None: + __body["nonce"] = nonce + if redirect_uri is not None: + __body["redirect_uri"] = redirect_uri + if state is not None: + __body["state"] = state + if realm is not None: + __body["realm"] = realm + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_authenticate", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("access_token", "refresh_token"), + ) + async def oidc_logout( + self, + *, + access_token: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + refresh_token: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Logout of OpenID Connect. Invalidate an access token and a refresh token that + were generated as a response to the `/_security/oidc/authenticate` API. If the + OpenID Connect authentication realm in Elasticsearch is accordingly configured, + the response to this call will contain a URI pointing to the end session endpoint + of the OpenID Connect Provider in order to perform single logout. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param access_token: The access token to be invalidated. + :param refresh_token: The refresh token to be invalidated. + """ + if access_token is None and body is None: + raise ValueError("Empty value passed for parameter 'access_token'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/logout" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access_token is not None: + __body["access_token"] = access_token + if refresh_token is not None: + __body["refresh_token"] = refresh_token + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_logout", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("iss", "login_hint", "nonce", "realm", "state"), + ) + async def oidc_prepare_authentication( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + iss: t.Optional[str] = None, + login_hint: t.Optional[str] = None, + nonce: t.Optional[str] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + state: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request + as a URL string based on the configuration of the OpenID Connect authentication + realm in Elasticsearch. The response of this API is a URL pointing to the Authorization + Endpoint of the configured OpenID Connect Provider, which can be used to redirect + the browser of the user in order to continue the authentication process. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param iss: In the case of a third party initiated single sign on, this is the + issuer identifier for the OP that the RP is to send the authentication request + to. It cannot be specified when *realm* is specified. One of *realm* or *iss* + is required. + :param login_hint: In the case of a third party initiated single sign on, it + is a string value that is included in the authentication request as the *login_hint* + parameter. This parameter is not valid when *realm* is specified. + :param nonce: The value used to associate a client session with an ID token and + to mitigate replay attacks. If the caller of the API does not provide a value, + Elasticsearch will generate one with sufficient entropy and return it in + the response. + :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration + of which should be used in order to generate the authentication request. + It cannot be specified when *iss* is specified. One of *realm* or *iss* is + required. + :param state: The value used to maintain state between the authentication request + and the response, typically used as a Cross-Site Request Forgery mitigation. + If the caller of the API does not provide a value, Elasticsearch will generate + one with sufficient entropy and return it in the response. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/prepare" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if iss is not None: + __body["iss"] = iss + if login_hint is not None: + __body["login_hint"] = login_hint + if nonce is not None: + __body["nonce"] = nonce + if realm is not None: + __body["realm"] = realm + if state is not None: + __body["state"] = state + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_prepare_authentication", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="privileges", ) diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 0301435c9..e4117bff8 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -42,8 +42,13 @@ async def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ async def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ async def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 831774cc2..6738eed0f 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -36,7 +36,9 @@ async def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ async def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ async def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ async def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ async def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 35bc41ca1..cbaf16a8b 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -44,8 +44,8 @@ async def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ async def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ async def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ async def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ async def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ async def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ async def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ async def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index d8c8b4168..27fd7e4c4 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -47,7 +47,17 @@ async def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ async def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ async def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 8da71dafb..a573d8238 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + async def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + async def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ async def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ async def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ async def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ async def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ async def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index e1fa776c7..320b66b8c 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -844,13 +844,20 @@ async def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 387c90355..f19212a90 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -37,7 +37,11 @@ async def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ async def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ async def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ async def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ async def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ async def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ async def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ async def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index ab1396e58..27cc16a64 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -43,7 +43,10 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ async def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 8a20bda38..faf63cf77 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -624,12 +624,14 @@ def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -649,6 +651,8 @@ def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -659,6 +663,8 @@ def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -692,6 +698,8 @@ def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -700,6 +708,8 @@ def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index e7028252d..bb66e693e 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -302,7 +302,6 @@ def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -325,7 +324,6 @@ def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -351,8 +349,6 @@ def count( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -383,7 +379,6 @@ def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -405,7 +400,6 @@ def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -433,8 +427,6 @@ def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -461,7 +453,6 @@ def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -490,7 +481,6 @@ def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -513,8 +503,6 @@ def health( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -536,59 +524,15 @@ def health( ) @_rewrite_parameters() - def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return self.perform_request( # type: ignore[return-value] "GET", @@ -854,7 +798,6 @@ def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -904,7 +847,9 @@ def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -925,7 +870,6 @@ def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -955,8 +899,6 @@ def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1026,7 +968,6 @@ def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1097,7 +1038,6 @@ def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1125,8 +1065,6 @@ def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1295,7 +1233,6 @@ def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1463,7 +1400,6 @@ def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1493,8 +1429,6 @@ def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1574,7 +1508,6 @@ def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1621,6 +1554,9 @@ def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1646,10 +1582,10 @@ def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1678,14 +1614,14 @@ def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1790,6 +1726,9 @@ def nodes( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1814,6 +1753,7 @@ def nodes( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1843,6 +1783,8 @@ def nodes( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1869,6 +1811,9 @@ def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1892,6 +1837,7 @@ def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1917,6 +1863,8 @@ def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1939,6 +1887,7 @@ def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -1958,6 +1907,7 @@ def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -1983,6 +1933,8 @@ def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2019,9 +1971,11 @@ def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2048,10 +2002,10 @@ def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2080,12 +2034,12 @@ def recovery( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2108,6 +2062,7 @@ def repositories( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2126,6 +2081,10 @@ def repositories( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param local: If `true`, the request computes the list of selected nodes from + the local cluster state. If `false` the list of selected nodes are computed + from the cluster state of the master node. In both cases the coordinating + node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as @@ -2147,6 +2106,8 @@ def repositories( __query["help"] = help if human is not None: __query["human"] = human + if local is not None: + __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -2272,6 +2233,9 @@ def shards( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2295,6 +2259,7 @@ def shards( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2325,6 +2290,8 @@ def shards( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2352,6 +2319,9 @@ def snapshots( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2377,6 +2347,7 @@ def snapshots( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2407,6 +2378,8 @@ def snapshots( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2432,12 +2405,16 @@ def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get task information. Get information about tasks currently running in the cluster. @@ -2455,14 +2432,18 @@ def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2483,18 +2464,22 @@ def tasks( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -2773,7 +2758,6 @@ def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2887,7 +2871,6 @@ def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -2918,8 +2901,6 @@ def transforms( __query["help"] = help if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index b7b2675b2..489f88ac0 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -36,7 +36,8 @@ def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -111,7 +112,10 @@ def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ @@ -231,8 +235,10 @@ def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -273,8 +279,9 @@ def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -327,7 +334,23 @@ def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -383,8 +406,7 @@ def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -428,7 +450,14 @@ def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -469,8 +498,10 @@ def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -545,9 +576,14 @@ def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -671,7 +707,11 @@ def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -736,7 +776,11 @@ def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -818,7 +862,8 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -854,8 +899,13 @@ def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 55b87e090..773438374 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -589,6 +589,125 @@ def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_delete( @@ -634,6 +753,64 @@ def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_get( @@ -1032,6 +1209,66 @@ def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 55d9a6d62..28b161f5e 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -167,6 +167,8 @@ def get_status( @_rewrite_parameters( body_fields=( "query", + "allow_partial_search_results", + "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", @@ -189,6 +191,8 @@ def search( index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, + allow_partial_search_results: t.Optional[bool] = None, + allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, @@ -234,6 +238,8 @@ def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: + :param allow_partial_search_results: + :param allow_partial_sequence_results: :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. @@ -287,6 +293,12 @@ def search( if not __body: if query is not None: __body["query"] = query + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results + if allow_partial_sequence_results is not None: + __body["allow_partial_sequence_results"] = ( + allow_partial_sequence_results + ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 4386b4533..d207b697a 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -35,8 +35,17 @@ def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 674b47f39..17f6f6e42 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -38,9 +38,9 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 2d5368773..4a70f9d61 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -245,8 +245,8 @@ def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1457,8 +1501,8 @@ def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1524,7 +1568,10 @@ def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1612,7 +1659,22 @@ def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1695,7 +1757,21 @@ def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2681,8 +2757,18 @@ def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -3335,7 +3421,16 @@ def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3415,9 +3510,25 @@ def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3551,7 +3662,21 @@ def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3615,9 +3740,20 @@ def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3868,8 +4004,9 @@ def segments( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3945,8 +4082,14 @@ def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4017,7 +4160,39 @@ def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4302,7 +4477,27 @@ def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4394,8 +4589,14 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4498,7 +4699,8 @@ def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 2fc2a8de6..39e861733 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -255,7 +255,21 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 445b5fe61..9fcd4064a 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -77,6 +77,59 @@ def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete IP geolocation database configurations. + + ``_ + + :param id: A comma-separated list of IP location database configurations. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. A value + of `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete_pipeline( self, @@ -217,6 +270,58 @@ def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get IP geolocation database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_pipeline( self, @@ -328,8 +433,8 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update GeoIP database configurations. Create or update IP geolocation - database configurations. + Create or update a GeoIP database configuration. Refer to the create or update + IP geolocation database configuration API. ``_ @@ -384,6 +489,74 @@ def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create or update an IP geolocation database configuration. + + ``_ + + :param id: The database configuration identifier. + :param configuration: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response indicates that it was not completely acknowledged. A value of + `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 43135d5a7..bcdee2f89 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -35,7 +35,9 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 040d5e030..0d5585db7 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -36,11 +36,12 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index b0bfb2f01..ef0dfa625 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -36,9 +36,10 @@ def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index df17aa247..86c717e78 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -2488,6 +2488,7 @@ def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -2697,7 +2702,7 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Return ML defaults and limits. Returns defaults and limits used by machine learning. + Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out @@ -3169,9 +3174,11 @@ def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: @@ -5469,7 +5521,7 @@ def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection detector. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 923a9b652..e4786a8ce 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -42,7 +42,8 @@ def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index e933f865d..67c66e3bb 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -43,7 +43,20 @@ def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index d13093fb0..9a3bfb313 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -216,7 +216,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -251,6 +251,71 @@ def list( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="payload", + ) + @_stability_warning(Stability.EXPERIMENTAL) + def post_behavioral_analytics_event( + self, + *, + collection_name: str, + event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], + payload: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + debug: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a behavioral analytics collection event. + + ``_ + + :param collection_name: The name of the behavioral analytics collection. + :param event_type: The analytics event type. + :param payload: + :param debug: Whether the response type has to include more details + """ + if collection_name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'collection_name'") + if event_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'event_type'") + if payload is None and body is None: + raise ValueError( + "Empty value passed for parameters 'payload' and 'body', one of them should be set." + ) + elif payload is not None and body is not None: + raise ValueError("Cannot set both 'payload' and 'body'") + __path_parts: t.Dict[str, str] = { + "collection_name": _quote(collection_name), + "event_type": _quote(event_type), + } + __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' + __query: t.Dict[str, t.Any] = {} + if debug is not None: + __query["debug"] = debug + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = payload if payload is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.post_behavioral_analytics_event", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="search_application", ) @@ -351,6 +416,70 @@ def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 34384a6df..e4ba75989 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -44,7 +44,8 @@ def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index e4064fcb4..dc956eeea 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -2324,6 +2324,230 @@ def invalidate_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("nonce", "redirect_uri", "state", "realm"), + ) + def oidc_authenticate( + self, + *, + nonce: t.Optional[str] = None, + redirect_uri: t.Optional[str] = None, + state: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Authenticate OpenID Connect. Exchange an OpenID Connect authentication response + message for an Elasticsearch internal access token and refresh token that can + be subsequently used for authentication. Elasticsearch exposes all the necessary + OpenID Connect related functionality with the OpenID Connect APIs. These APIs + are used internally by Kibana in order to provide OpenID Connect based authentication, + but can also be used by other, custom web applications or other clients. + + ``_ + + :param nonce: Associate a client session with an ID token and mitigate replay + attacks. This value needs to be the same as the one that was provided to + the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + and included in the response to that call. + :param redirect_uri: The URL to which the OpenID Connect Provider redirected + the User Agent in response to an authentication request after a successful + authentication. This URL must be provided as-is (URL encoded), taken from + the body of the response or as the value of a location header in the response + from the OpenID Connect Provider. + :param state: Maintain state between the authentication request and the response. + This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` + API or the one that was generated by Elasticsearch and included in the response + to that call. + :param realm: The name of the OpenID Connect realm. This property is useful in + cases where multiple realms are defined. + """ + if nonce is None and body is None: + raise ValueError("Empty value passed for parameter 'nonce'") + if redirect_uri is None and body is None: + raise ValueError("Empty value passed for parameter 'redirect_uri'") + if state is None and body is None: + raise ValueError("Empty value passed for parameter 'state'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/authenticate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if nonce is not None: + __body["nonce"] = nonce + if redirect_uri is not None: + __body["redirect_uri"] = redirect_uri + if state is not None: + __body["state"] = state + if realm is not None: + __body["realm"] = realm + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_authenticate", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("access_token", "refresh_token"), + ) + def oidc_logout( + self, + *, + access_token: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + refresh_token: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Logout of OpenID Connect. Invalidate an access token and a refresh token that + were generated as a response to the `/_security/oidc/authenticate` API. If the + OpenID Connect authentication realm in Elasticsearch is accordingly configured, + the response to this call will contain a URI pointing to the end session endpoint + of the OpenID Connect Provider in order to perform single logout. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param access_token: The access token to be invalidated. + :param refresh_token: The refresh token to be invalidated. + """ + if access_token is None and body is None: + raise ValueError("Empty value passed for parameter 'access_token'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/logout" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access_token is not None: + __body["access_token"] = access_token + if refresh_token is not None: + __body["refresh_token"] = refresh_token + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_logout", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("iss", "login_hint", "nonce", "realm", "state"), + ) + def oidc_prepare_authentication( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + iss: t.Optional[str] = None, + login_hint: t.Optional[str] = None, + nonce: t.Optional[str] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + state: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request + as a URL string based on the configuration of the OpenID Connect authentication + realm in Elasticsearch. The response of this API is a URL pointing to the Authorization + Endpoint of the configured OpenID Connect Provider, which can be used to redirect + the browser of the user in order to continue the authentication process. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param iss: In the case of a third party initiated single sign on, this is the + issuer identifier for the OP that the RP is to send the authentication request + to. It cannot be specified when *realm* is specified. One of *realm* or *iss* + is required. + :param login_hint: In the case of a third party initiated single sign on, it + is a string value that is included in the authentication request as the *login_hint* + parameter. This parameter is not valid when *realm* is specified. + :param nonce: The value used to associate a client session with an ID token and + to mitigate replay attacks. If the caller of the API does not provide a value, + Elasticsearch will generate one with sufficient entropy and return it in + the response. + :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration + of which should be used in order to generate the authentication request. + It cannot be specified when *iss* is specified. One of *realm* or *iss* is + required. + :param state: The value used to maintain state between the authentication request + and the response, typically used as a Cross-Site Request Forgery mitigation. + If the caller of the API does not provide a value, Elasticsearch will generate + one with sufficient entropy and return it in the response. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/prepare" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if iss is not None: + __body["iss"] = iss + if login_hint is not None: + __body["login_hint"] = login_hint + if nonce is not None: + __body["nonce"] = nonce + if realm is not None: + __body["realm"] = realm + if state is not None: + __body["state"] = state + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_prepare_authentication", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="privileges", ) diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index e08eb469a..bfa561089 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -42,8 +42,13 @@ def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 86f04928f..16737080b 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -36,7 +36,9 @@ def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 0ffad7ff9..f9a92c078 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -44,8 +44,8 @@ def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index bdba7f09b..24886bc75 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -47,7 +47,17 @@ def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index 48f02393a..01ec7c5f8 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 49613f26f..b10144783 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -844,13 +844,20 @@ def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index dfb71a1be..9378588a8 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -37,7 +37,11 @@ def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index ddcdc3c54..6c5073b14 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -43,7 +43,10 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_