From 5978b16533a2ac43abf73b62e5a9294c0dfb0879 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 24 Jun 2024 13:57:17 +0400 Subject: [PATCH] Run code generation --- .../_async/client/__init__.py | 73 +++-- .../_async/client/async_search.py | 27 +- elasticsearch_serverless/_async/client/cat.py | 63 +++- .../_async/client/cluster.py | 23 +- .../_async/client/enrich.py | 6 +- elasticsearch_serverless/_async/client/eql.py | 11 +- .../_async/client/graph.py | 4 +- .../_async/client/indices.py | 90 +++--- .../_async/client/inference.py | 14 +- .../_async/client/ingest.py | 15 +- .../_async/client/license.py | 4 +- .../_async/client/logstash.py | 6 +- elasticsearch_serverless/_async/client/ml.py | 280 +++++++++++++++--- .../_async/client/query_ruleset.py | 4 +- .../_async/client/search_application.py | 9 +- .../_async/client/security.py | 47 ++- .../_async/client/synonyms.py | 2 +- .../_async/client/transform.py | 63 +++- .../_sync/client/__init__.py | 73 +++-- .../_sync/client/async_search.py | 27 +- elasticsearch_serverless/_sync/client/cat.py | 63 +++- .../_sync/client/cluster.py | 23 +- .../_sync/client/enrich.py | 6 +- elasticsearch_serverless/_sync/client/eql.py | 11 +- .../_sync/client/graph.py | 4 +- .../_sync/client/indices.py | 90 +++--- .../_sync/client/inference.py | 14 +- .../_sync/client/ingest.py | 15 +- .../_sync/client/license.py | 4 +- .../_sync/client/logstash.py | 6 +- elasticsearch_serverless/_sync/client/ml.py | 280 +++++++++++++++--- .../_sync/client/query_ruleset.py | 4 +- .../_sync/client/search_application.py | 9 +- .../_sync/client/security.py | 47 ++- .../_sync/client/synonyms.py | 2 +- .../_sync/client/transform.py | 63 +++- 36 files changed, 1138 insertions(+), 344 deletions(-) diff --git a/elasticsearch_serverless/_async/client/__init__.py b/elasticsearch_serverless/_async/client/__init__.py index 130373b..0dff9a6 100644 --- a/elasticsearch_serverless/_async/client/__init__.py +++ b/elasticsearch_serverless/_async/client/__init__.py @@ -466,7 +466,8 @@ async def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to perform multiple index/update/delete operations in a single request. + Performs multiple indexing or delete operations in a single API call. This reduces + overhead and can greatly increase indexing speed. ``_ @@ -556,7 +557,7 @@ async def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explicitly clears the search context for a scroll. + Clears the search context and results for a scrolling search. ``_ @@ -599,7 +600,7 @@ async def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time + Closes a point-in-time. ``_ @@ -790,8 +791,9 @@ async def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new document in the index. Returns a 409 response when a document with - a same ID already exists in the index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -888,7 +890,7 @@ async def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a document from the index. + Removes a JSON document from the specified index. ``_ @@ -1006,7 +1008,7 @@ async def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes documents matching the provided query. + Deletes documents that match the specified query. ``_ @@ -1180,7 +1182,7 @@ async def delete_script( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a script. + Deletes a stored script or search template. ``_ @@ -1242,7 +1244,7 @@ async def exists( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document exists in an index. + Checks if a document in an index exists. ``_ @@ -1337,7 +1339,7 @@ async def exists_source( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document source exists in an index. + Checks if a document's `_source` is stored. ``_ @@ -1431,7 +1433,8 @@ async def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about why a specific matches (or doesn't match) a query. + Returns information about why a specific document matches (or doesn’t match) + a query. ``_ @@ -1543,7 +1546,10 @@ async def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about the capabilities of fields among multiple indices. + The field capabilities API returns the information about the capabilities of + fields among multiple indices. The field capabilities API returns runtime fields + like any other field. For example, a runtime field with a type of keyword is + returned as any other field that belongs to the `keyword` family. ``_ @@ -1735,7 +1741,7 @@ async def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a script. + Retrieves a stored script or search template. ``_ @@ -1887,7 +1893,9 @@ async def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a document in an index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -2261,7 +2269,7 @@ async def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search template operations in one request. + Runs multiple templated searches with a single request. ``_ @@ -2445,7 +2453,13 @@ async def open_point_in_time( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time that can be used in subsequent searches + A search request by default executes against the most recent visible data of + the target indices, which is called point in time. Elasticsearch pit (point in + time) is a lightweight view into the state of the data as it existed when initiated. + In some cases, it’s preferred to perform multiple search requests using the same + point in time. For example, if refreshes happen between `search_after` requests, + then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time. ``_ @@ -2511,7 +2525,7 @@ async def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a script. + Creates or updates a stored script or search template. ``_ @@ -2587,8 +2601,8 @@ async def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to evaluate the quality of ranked search results over a set of typical - search queries + Enables you to evaluate the quality of ranked search results over a set of typical + search queries. ``_ @@ -2770,7 +2784,7 @@ async def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Renders a search template as a search request body. ``_ @@ -2829,7 +2843,7 @@ async def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows an arbitrary script to be executed and a result to be returned + Runs a script and returns a result. ``_ @@ -2939,6 +2953,7 @@ async def scroll( "query", "rank", "rescore", + "retriever", "runtime_mappings", "script_fields", "search_after", @@ -3020,6 +3035,7 @@ async def search( t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, + retriever: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -3060,7 +3076,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query. + Returns search hits that match the query defined in the request. You can provide + search queries using the `q` query string parameter or the request body. If both + are specified, only the query parameter is used. ``_ @@ -3179,6 +3197,9 @@ async def search( example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. + :param retriever: A retriever is a specification to describe top documents returned + from a search. A retriever replaces other elements of the search API that + also return top documents such as query and knn. :param routing: Custom value used to route operations to a specific shard. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. @@ -3372,6 +3393,8 @@ async def search( __body["rank"] = rank if rescore is not None: __body["rescore"] = rescore + if retriever is not None: + __body["retriever"] = retriever if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: @@ -3625,7 +3648,7 @@ async def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Runs a search with a search template. ``_ @@ -4134,8 +4157,8 @@ async def update_by_query( ) -> ObjectApiResponse[t.Any]: """ Updates documents that match the specified query. If no query is specified, performs - an update on every document in the index without changing the source, for example - to pick up a mapping change. + an update on every document in the data stream or index without modifying the + source, which is useful for picking up mapping changes. ``_ diff --git a/elasticsearch_serverless/_async/client/async_search.py b/elasticsearch_serverless/_async/client/async_search.py index 5a9bd08..c2470c8 100644 --- a/elasticsearch_serverless/_async/client/async_search.py +++ b/elasticsearch_serverless/_async/client/async_search.py @@ -36,8 +36,11 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by ID. If the search is still running, the search request - will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async search by identifier. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. If + the Elasticsearch security features are enabled, the deletion of a specific async + search is restricted to: the authenticated user that submitted the original search + request; users that have the `cancel_task` cluster privilege. ``_ @@ -77,7 +80,9 @@ async def get( ) -> ObjectApiResponse[t.Any]: """ Retrieves the results of a previously submitted async search request given its - ID. + identifier. If the Elasticsearch security features are enabled, access to the + results of a specific async search is restricted to the user or API key that + submitted it. ``_ @@ -131,8 +136,10 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of a previously submitted async search request given its - ID. + Get async search status Retrieves the status of a previously submitted async + search request given its identifier, without retrieving search results. If the + Elasticsearch security features are enabled, use of this API is restricted to + the `monitoring_user` role. ``_ @@ -298,7 +305,15 @@ async def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a search request asynchronously. + Runs a search request asynchronously. When the primary sort of the results is + an indexed field, shards get sorted based on minimum and maximum value that they + hold for that field, hence partial results become available following the sort + criteria that was requested. Warning: Async search does not support scroll nor + search requests that only include the suggest section. By default, Elasticsearch + doesn’t allow you to store an async search response larger than 10Mb and an attempt + to do this results in an error. The maximum allowed size for a stored async search + response can be set by changing the `search.max_async_search_response_size` cluster + level setting. ``_ diff --git a/elasticsearch_serverless/_async/client/cat.py b/elasticsearch_serverless/_async/client/cat.py index c9a65c5..7e5ed49 100644 --- a/elasticsearch_serverless/_async/client/cat.py +++ b/elasticsearch_serverless/_async/client/cat.py @@ -53,8 +53,11 @@ async def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows information about currently configured aliases to indices including filter - and routing infos. + Retrieves the cluster’s index aliases, including filter and routing information. + The API does not return data stream aliases. IMPORTANT: cat APIs are only intended + for human consumption using the command line or the Kibana console. They are + not intended for use by applications. For application consumption, use the aliases + API. ``_ @@ -131,7 +134,11 @@ async def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing component_templates templates. + Returns information about component templates in a cluster. Component templates + are building blocks for constructing index templates that specify index mappings, + settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get component template API. ``_ @@ -204,8 +211,12 @@ async def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides quick access to the document count of the entire cluster, or individual - indices. + Provides quick access to a document count for a data stream, an index, or an + entire cluster. NOTE: The document count only includes live documents, not deleted + documents which have not yet been removed by the merge process. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the count API. ``_ @@ -363,8 +374,16 @@ async def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about indices: number of primaries and replicas, document - counts, disk size, ... + Returns high-level information about indices in a cluster, including backing + indices for data streams. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index API. Use the cat indices API to + get the following information for each index in a cluster: shard count; document + count; deleted document count; primary store size; total store size of all shards, + including shard replicas. These metrics are retrieved directly from Lucene, which + Elasticsearch uses internally to power indexing and search. As a result, all + document counts include hidden nested documents. To get an accurate count of + Elasticsearch documents, use the cat count or count APIs. ``_ @@ -489,7 +508,10 @@ async def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about data frame analytics jobs. + Returns configuration and usage information about data frame analytics jobs. + IMPORTANT: cat APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For application + consumption, use the get data frame analytics jobs statistics API. ``_ @@ -600,7 +622,12 @@ async def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about datafeeds. + Returns configuration and usage information about datafeeds. This API returns + a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, + you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges + to use this API. IMPORTANT: cat APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get datafeed statistics API. ``_ @@ -717,7 +744,13 @@ async def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about anomaly detection jobs. + Returns configuration and usage information for anomaly detection jobs. This + API returns a maximum of 10,000 jobs. If the Elasticsearch security features + are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` + cluster privileges to use this API. IMPORTANT: cat APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get anomaly detection + job statistics API. ``_ @@ -837,7 +870,10 @@ async def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about inference trained models. + Returns configuration and usage information about inference trained models. IMPORTANT: + cat APIs are only intended for human consumption using the Kibana console or + command line. They are not intended for use by applications. For application + consumption, use the get trained models statistics API. ``_ @@ -960,7 +996,10 @@ async def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about transforms. + Returns configuration and usage information about transforms. IMPORTANT: cat + APIs are only intended for human consumption using the Kibana console or command + line. They are not intended for use by applications. For application consumption, + use the get transform statistics API. ``_ diff --git a/elasticsearch_serverless/_async/client/cluster.py b/elasticsearch_serverless/_async/client/cluster.py index 1675534..1a1d8ad 100644 --- a/elasticsearch_serverless/_async/client/cluster.py +++ b/elasticsearch_serverless/_async/client/cluster.py @@ -40,7 +40,8 @@ async def delete_component_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a component template + Deletes component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -139,7 +140,7 @@ async def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns one or more component templates + Retrieves information about component templates. ``_ @@ -233,7 +234,6 @@ async def put_component_template( *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, - cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -248,7 +248,19 @@ async def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a component template + Creates or updates a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. + You can include comments anywhere in the request body except before the opening + curly bracket. ``_ @@ -263,7 +275,6 @@ async def put_component_template( update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. - :param cause: :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or @@ -287,8 +298,6 @@ async def put_component_template( __path = f"/_component_template/{_quote(name)}" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} - if cause is not None: - __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: diff --git a/elasticsearch_serverless/_async/client/enrich.py b/elasticsearch_serverless/_async/client/enrich.py index 78bebc2..3ddcfc1 100644 --- a/elasticsearch_serverless/_async/client/enrich.py +++ b/elasticsearch_serverless/_async/client/enrich.py @@ -109,7 +109,7 @@ async def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets information about an enrich policy. + Returns information about an enrich policy. ``_ @@ -151,7 +151,7 @@ async def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new enrich policy. + Creates an enrich policy. ``_ @@ -197,7 +197,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets enrich coordinator statistics and information about enrich policies that + Returns enrich coordinator statistics and information about enrich policies that are currently executing. ``_ diff --git a/elasticsearch_serverless/_async/client/eql.py b/elasticsearch_serverless/_async/client/eql.py index 4c5af56..7b5885a 100644 --- a/elasticsearch_serverless/_async/client/eql.py +++ b/elasticsearch_serverless/_async/client/eql.py @@ -36,8 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search by ID. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async EQL search or a stored synchronous EQL search. The API also + deletes results for the search. ``_ @@ -77,7 +77,8 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns async results from previously executed Event Query Language (EQL) search + Returns the current status and available results for an async EQL search or a + stored synchronous EQL search. ``_ @@ -121,8 +122,8 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the status of a previously submitted async or stored Event Query Language - (EQL) search + Returns the current status for an async EQL search or a stored synchronous EQL + search without returning results. ``_ diff --git a/elasticsearch_serverless/_async/client/graph.py b/elasticsearch_serverless/_async/client/graph.py index 944c140..b860ce8 100644 --- a/elasticsearch_serverless/_async/client/graph.py +++ b/elasticsearch_serverless/_async/client/graph.py @@ -45,8 +45,8 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore extracted and summarized information about the documents and terms in - an index. + Extracts and summarizes information about the documents and terms in an Elasticsearch + data stream or index. ``_ diff --git a/elasticsearch_serverless/_async/client/indices.py b/elasticsearch_serverless/_async/client/indices.py index e93f016..a66095b 100644 --- a/elasticsearch_serverless/_async/client/indices.py +++ b/elasticsearch_serverless/_async/client/indices.py @@ -129,8 +129,7 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the analysis process on a text and return the tokens breakdown of the - text. + Performs analysis on a text string and returns the resulting tokens. ``_ @@ -221,7 +220,7 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an index with optional settings and mappings. + Creates a new index. ``_ @@ -285,7 +284,8 @@ async def create_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a data stream + Creates a data stream. You must have a matching index template with data stream + enabled. ``_ @@ -331,7 +331,7 @@ async def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in a data stream. + Retrieves statistics for one or more data streams. ``_ @@ -386,7 +386,7 @@ async def delete( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index. + Deletes one or more indices. ``_ @@ -452,7 +452,7 @@ async def delete_alias( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an alias. + Removes a data stream or index from an alias. ``_ @@ -512,7 +512,8 @@ async def delete_data_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the data stream lifecycle of the selected data streams. + Removes the data lifecycle from a data stream rendering it not managed by the + data stream lifecycle ``_ @@ -565,7 +566,7 @@ async def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a data stream. + Deletes one or more data streams and their backing indices. ``_ @@ -608,7 +609,9 @@ async def delete_index_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + The provided may contain multiple template names separated by + a comma. If multiple template names are specified then there is no wildcard support + and the provided names should match completely with existing templates. ``_ @@ -665,7 +668,7 @@ async def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index exists. + Checks if a data stream, index, or alias exists. ``_ @@ -737,7 +740,7 @@ async def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular alias exists. + Checks if an alias exists. ``_ @@ -911,7 +914,8 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more indices. + Returns information about one or more indices. For data streams, the API returns + information about the stream’s backing indices. ``_ @@ -994,7 +998,7 @@ async def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an alias. + Retrieves information for one or more aliases. ``_ @@ -1065,7 +1069,7 @@ async def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the data stream lifecycle of the selected data streams. + Retrieves the data stream lifecycle configuration of one or more data streams. ``_ @@ -1118,7 +1122,7 @@ async def get_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns data streams. + Retrieves information about one or more data streams. ``_ @@ -1169,7 +1173,7 @@ async def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Returns information about one or more index templates. ``_ @@ -1236,7 +1240,8 @@ async def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mappings for one or more indices. + Retrieves mapping definitions for one or more indices. For data streams, the + API retrieves mappings for the stream’s backing indices. ``_ @@ -1314,7 +1319,8 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns settings for one or more indices. + Returns setting information for one or more indices. For data streams, returns + setting information for the stream’s backing indices. ``_ @@ -1388,7 +1394,14 @@ async def migrate_to_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates an alias to a data stream + Converts an index alias to a data stream. You must have a matching index template + that is data stream enabled. The alias must meet the following criteria: The + alias must have a write index; All indices for the alias must have a `@timestamp` + field mapping of a `date` or `date_nanos` field type; The alias must not have + any filters; The alias must not use custom routing. If successful, the request + removes the alias and creates a data stream with the same name. The indices for + the alias become hidden backing indices for the stream. The write index for the + alias becomes the write index for the stream. ``_ @@ -1425,7 +1438,7 @@ async def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Modifies a data stream + Performs one or more data stream modification actions in a single atomic operation. ``_ @@ -1482,7 +1495,7 @@ async def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an alias. + Adds a data stream or index to an alias. ``_ @@ -1581,7 +1594,7 @@ async def put_data_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the data stream lifecycle of the selected data streams. + Update the data lifecycle of the specified data streams. ``_ @@ -1677,7 +1690,8 @@ async def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -1831,7 +1845,9 @@ async def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index mappings. + Adds new fields to an existing data stream or index. You can also use this API + to change the search settings of existing fields. For data streams, these changes + are applied to all backing indices by default. ``_ @@ -1955,7 +1971,8 @@ async def put_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index settings. + Changes a dynamic index setting in real time. For data streams, index setting + changes are applied to all backing indices by default. ``_ @@ -2052,7 +2069,8 @@ async def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -2133,7 +2151,9 @@ async def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the refresh operation in one or more indices. + A refresh makes recent operations performed on one or more indices available + for search. For data streams, the API runs the refresh operation on the stream’s + backing indices. ``_ @@ -2193,7 +2213,8 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about any matching indices, aliases, and data streams + Resolves the specified name(s) and/or index patterns for indices, aliases, and + data streams. Multiple patterns and remote clusters are supported. ``_ @@ -2251,8 +2272,7 @@ async def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an alias to point to a new index when the existing index is considered - to be too large or too old. + Creates a new index for a data stream or index alias. ``_ @@ -2341,7 +2361,7 @@ async def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate matching the given index name against the index templates in the system + ``_ @@ -2414,7 +2434,7 @@ async def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate resolving the given template name or body + Returns the index configuration that would be applied by a particular index template. ``_ @@ -2532,7 +2552,7 @@ async def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates index aliases. + Adds a data stream or index to an alias. ``_ @@ -2600,7 +2620,7 @@ async def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a user to validate a potentially expensive query without executing it. + Validates a potentially expensive query without executing it. ``_ diff --git a/elasticsearch_serverless/_async/client/inference.py b/elasticsearch_serverless/_async/client/inference.py index 454fb3e..7a2b77f 100644 --- a/elasticsearch_serverless/_async/client/inference.py +++ b/elasticsearch_serverless/_async/client/inference.py @@ -36,8 +36,10 @@ async def delete( str, ] ] = None, + dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -48,6 +50,10 @@ async def delete( :param inference_id: The inference Id :param task_type: The task type + :param dry_run: When true, the endpoint is not deleted, and a list of ingest + processors which reference this endpoint is returned + :param force: When true, the inference endpoint is forcefully deleted even if + it is still being used by ingest processors or semantic text fields """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -58,10 +64,14 @@ async def delete( else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if force is not None: + __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: @@ -139,7 +149,7 @@ async def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference + Perform inference on the service ``_ @@ -210,7 +220,7 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Configure an inference endpoint for use in the Inference API + Create an inference endpoint ``_ diff --git a/elasticsearch_serverless/_async/client/ingest.py b/elasticsearch_serverless/_async/client/ingest.py index 093806f..b11ed87 100644 --- a/elasticsearch_serverless/_async/client/ingest.py +++ b/elasticsearch_serverless/_async/client/ingest.py @@ -40,7 +40,7 @@ async def delete_pipeline( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline. + Deletes one or more existing ingest pipeline. ``_ @@ -88,7 +88,8 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a pipeline. + Returns information about one or more ingest pipelines. This API returns a local + reference of the pipeline. ``_ @@ -131,7 +132,10 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of the built-in patterns. + Extracts structured fields out of a single text field within a document. You + choose which field to extract matched fields from, as well as the grok pattern + you expect will match. A grok pattern is like a regular expression that supports + aliased expressions that can be reused. ``_ """ @@ -175,7 +179,8 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline. + Creates or updates an ingest pipeline. Changes made using this API take effect + immediately. ``_ @@ -254,7 +259,7 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to simulate a pipeline with example documents. + Executes an ingest pipeline against a set of provided documents. ``_ diff --git a/elasticsearch_serverless/_async/client/license.py b/elasticsearch_serverless/_async/client/license.py index 24800e5..797a541 100644 --- a/elasticsearch_serverless/_async/client/license.py +++ b/elasticsearch_serverless/_async/client/license.py @@ -37,7 +37,9 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves licensing information for the cluster + This API returns information about the type of license, when it was issued, and + when it expires, for example. For more information about the different types + of licenses, see https://www.elastic.co/subscriptions. ``_ diff --git a/elasticsearch_serverless/_async/client/logstash.py b/elasticsearch_serverless/_async/client/logstash.py index 6327471..d993581 100644 --- a/elasticsearch_serverless/_async/client/logstash.py +++ b/elasticsearch_serverless/_async/client/logstash.py @@ -36,7 +36,7 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes Logstash Pipelines used by Central Management + Deletes a pipeline used for Logstash Central Management. ``_ @@ -70,7 +70,7 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves Logstash Pipelines used by Central Management + Retrieves pipelines used for Logstash Central Management. ``_ @@ -109,7 +109,7 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates Logstash Pipelines used for Central Management + Creates or updates a pipeline used for Logstash Central Management. ``_ diff --git a/elasticsearch_serverless/_async/client/ml.py b/elasticsearch_serverless/_async/client/ml.py index 56cebb6..c823f5c 100644 --- a/elasticsearch_serverless/_async/client/ml.py +++ b/elasticsearch_serverless/_async/client/ml.py @@ -42,8 +42,19 @@ async def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes one or more anomaly detection jobs. A job can be opened and closed multiple - times throughout its lifecycle. + Close anomaly detection jobs A job can be opened and closed multiple times throughout + its lifecycle. A closed job cannot receive data or perform analysis operations, + but you can still explore and navigate results. When you close a job, it runs + housekeeping tasks such as pruning the model history, flushing buffers, calculating + final results and persisting the model snapshots. Depending upon the size of + the job, it could take several minutes to close and the equivalent time to re-open. + After it is closed, the job has a minimal overhead on the cluster except for + maintaining its meta data. Therefore it is a best practice to close jobs that + are no longer required to process data. If you close an anomaly detection job + whose datafeed is running, the request first tries to stop the datafeed. This + behavior is equivalent to calling stop datafeed API with the same timeout and + force parameters as the close job request. When a datafeed that has a specified + end date stops, it automatically closes its associated job. ``_ @@ -97,7 +108,7 @@ async def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a calendar. + Removes all scheduled events from a calendar, then deletes it. ``_ @@ -211,7 +222,7 @@ async def delete_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing data frame analytics job. + Deletes a data frame analytics job. ``_ @@ -294,7 +305,9 @@ async def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a filter. + Deletes a filter. If an anomaly detection job references the filter, you cannot + delete the filter. You must update or delete the job before you can delete the + filter. ``_ @@ -331,7 +344,12 @@ async def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing anomaly detection job. + Deletes an anomaly detection job. All job configuration, model state and results + are deleted. It is not currently possible to delete multiple jobs using wildcards + or a comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling the + delete datafeed API with the same timeout and force parameters as the delete + job request. ``_ @@ -419,7 +437,9 @@ async def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a model alias that refers to the trained model + Deletes a trained model alias. This API deletes an existing model alias that + refers to a trained model. If the model alias is missing or refers to a model + other than the one identified by the `model_id`, this API returns an error. ``_ @@ -465,7 +485,9 @@ async def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimates the model memory + Makes an estimation of the memory usage for an anomaly detection job model. It + is based on analysis configuration details for the job and cardinality estimates + for the fields it references. ``_ @@ -523,7 +545,10 @@ async def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates the data frame analytics for an annotated index. + Evaluates the data frame analytics for an annotated index. The API packages together + commonly used evaluation metrics for various types of machine learning features. + This has been designed for use on indexes created by data frame analytics. Evaluation + requires both a ground truth field and an analytics result field to be present. ``_ @@ -578,7 +603,14 @@ async def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. + Forces any buffered data to be processed by the job. The flush jobs API is only + applicable when sending data for analysis using the post data API. Depending + on the content of the buffer, then it might additionally calculate new results. + Both flush and close operations are similar, however the flush is more efficient + if you are expecting to send more data for analysis. When flushing, the job remains + open and is available to continue analyzing data. A close operation additionally + prunes and persists the model state to disk and the job must be opened again + before analyzing further data. ``_ @@ -761,7 +793,9 @@ async def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. + Retrieves configuration information for data frame analytics jobs. You can get + information for multiple data frame analytics jobs in a single API request by + using a comma-separated list of data frame analytics jobs or a wildcard expression. ``_ @@ -882,7 +916,12 @@ async def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. + Retrieves usage information for datafeeds. You can get statistics for multiple + datafeeds in a single API request by using a comma-separated list of datafeeds + or a wildcard expression. You can get statistics for all datafeeds by using `_all`, + by specifying `*` as the ``, or by omitting the ``. If the + datafeed is stopped, the only information you receive is the `datafeed_id` and + the `state`. This API returns a maximum of 10,000 datafeeds. ``_ @@ -930,7 +969,11 @@ async def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. + Retrieves configuration information for datafeeds. You can get information for + multiple datafeeds in a single API request by using a comma-separated list of + datafeeds or a wildcard expression. You can get information for all datafeeds + by using `_all`, by specifying `*` as the ``, or by omitting the ``. + This API returns a maximum of 10,000 datafeeds. ``_ @@ -985,7 +1028,7 @@ async def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. + Retrieves filters. You can get a single filter or all filters. ``_ @@ -1076,7 +1119,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. + Retrieves configuration information for anomaly detection jobs. You can get information + for multiple anomaly detection jobs in a single API request by using a group + name, a comma-separated list of jobs, or a wildcard expression. You can get information + for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, + or by omitting the ``. ``_ @@ -1146,7 +1193,18 @@ async def get_overall_buckets( ) -> ObjectApiResponse[t.Any]: """ Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. + anomaly detection jobs. The `overall_score` is calculated by combining the scores + of all the buckets within the overall bucket span. First, the maximum `anomaly_score` + per anomaly detection job in the overall bucket is calculated. Then the `top_n` + of those scores are averaged to result in the `overall_score`. This means that + you can fine-tune the `overall_score` so that it is more or less sensitive to + the number of jobs that detect an anomaly at the same time. For example, if you + set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall + bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` + is high only when all jobs detect anomalies in that overall bucket. If you set + the `bucket_span` parameter (to a value greater than its default), the `overall_score` + is the maximum `overall_score` of the overall buckets that have a span equal + to the jobs' largest bucket span. ``_ @@ -1208,7 +1266,7 @@ async def get_overall_buckets( async def get_trained_models( self, *, - model_id: t.Optional[str] = None, + model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -1224,14 +1282,16 @@ async def get_trained_models( ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, - tags: t.Optional[str] = None, + tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained inference model. + Retrieves configuration information for a trained model. ``_ - :param model_id: The unique identifier of the trained model. + :param model_id: The unique identifier of the trained model or a model alias. + You can get information for multiple trained models in a single API request + by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions @@ -1299,7 +1359,9 @@ async def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained inference models. + Retrieves usage information for trained models. You can get usage information + for multiple trained models in a single API request by using a comma-separated + list of model IDs or a wildcard expression. ``_ @@ -1354,7 +1416,7 @@ async def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + Evaluates a trained model. ``_ @@ -1409,7 +1471,12 @@ async def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens one or more anomaly detection jobs. + Opens one or more anomaly detection jobs. An anomaly detection job must be opened + in order for it to be ready to receive and analyze data. It can be opened and + closed multiple times throughout its lifecycle. When you open a new job, it starts + with an empty model. When you open an existing job, the most recent model state + is automatically loaded. The job is ready to resume its analysis from where it + left off, once new data is received. ``_ @@ -1456,7 +1523,7 @@ async def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Posts scheduled events in a calendar. + Adds scheduled events to a calendar. ``_ @@ -1503,7 +1570,7 @@ async def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews that will be analyzed given a data frame analytics config. + Previews the extracted features used by a data frame analytics config. ``_ @@ -1556,7 +1623,15 @@ async def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. + Previews a datafeed. This API returns the first "page" of search results from + a datafeed. You can preview an existing datafeed or provide configuration details + for a datafeed and anomaly detection job in the API. The preview shows the structure + of the data that will be passed to the anomaly detection engine. IMPORTANT: When + Elasticsearch security features are enabled, the preview uses the credentials + of the user that called the API. However, when the datafeed starts it uses the + roles of the last user that created or updated the datafeed. To get a preview + that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials. ``_ @@ -1623,7 +1698,7 @@ async def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a calendar. + Creates a calendar. ``_ @@ -1663,7 +1738,7 @@ async def put_calendar_job( self, *, calendar_id: str, - job_id: str, + job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -1733,7 +1808,9 @@ async def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. + Instantiates a data frame analytics job. This API creates a data frame analytics + job that performs an analysis on the source indices and stores the outcome in + a destination index. ``_ @@ -1894,7 +1971,17 @@ async def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. + Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis + by an anomaly detection job. You can associate only one datafeed with each anomaly + detection job. The datafeed contains a query that runs at a defined interval + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. When Elasticsearch security features are enabled, your datafeed + remembers which roles the user who created it had at the time of creation and + runs the query using those same roles. If you provide secondary authorization + headers, those credentials are used instead. You must use Kibana, this API, or + the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the `.ml-config` index. Do not give users `write` privileges on the + `.ml-config` index. ``_ @@ -2040,7 +2127,9 @@ async def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. + Instantiates a filter. A filter contains a list of strings. It can be used by + one or more anomaly detection jobs. Specifically, filters are referenced in the + `custom_rules` property of detector configuration objects. ``_ @@ -2119,7 +2208,8 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates an anomaly detection job. + Instantiates an anomaly detection job. If you include a `datafeed_config`, you + must have read index privileges on the source index. ``_ @@ -2292,7 +2382,7 @@ async def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an inference trained model. + Enables you to supply a trained model that is not created by data frame analytics. ``_ @@ -2387,8 +2477,19 @@ async def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new model alias (or reassigns an existing one) to refer to the trained - model + Creates or updates a trained model alias. A trained model alias is a logical + name used to reference a single trained model. You can use aliases instead of + trained model identifiers to make it easier to reference your models. For example, + you can use aliases in inference aggregations and processors. An alias must be + unique and refer to only a single trained model. However, you can have multiple + aliases for each trained model. If you use this API to update an alias such that + it references a different trained model ID and the model uses a different type + of data frame analytics, an error occurs. For example, this situation occurs + if you have a trained model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API returns + a warning. ``_ @@ -2437,7 +2538,7 @@ async def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition + Creates part of a trained model definition. ``_ @@ -2504,7 +2605,9 @@ async def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary + Creates a trained model vocabulary. This API is supported only for natural language + processing (NLP) models. The vocabulary is stored in the index as described in + `inference_config.*.vocabulary` of the trained model definition. ``_ @@ -2553,7 +2656,9 @@ async def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing anomaly detection job. + Resets an anomaly detection job. All model state and results are deleted. The + job is ready to start over as if it had just been created. It is not currently + possible to reset multiple jobs using wildcards or a comma separated list. ``_ @@ -2597,7 +2702,16 @@ async def start_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. + Starts a data frame analytics job. A data frame analytics job can be started + and stopped multiple times throughout its lifecycle. If the destination index + does not exist, it is created automatically the first time you start the data + frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` + settings for the destination index are copied from the source index. If there + are multiple source indices, the destination index copies the highest setting + values. The mappings for the destination index are also copied from the source + indices. If there are any mapping conflicts, the job fails to start. If the destination + index exists, it is used as is. You can therefore set up the destination index + in advance with custom settings and mappings. ``_ @@ -2643,7 +2757,17 @@ async def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. + Starts one or more datafeeds. A datafeed must be started in order to retrieve + data from Elasticsearch. A datafeed can be started and stopped multiple times + throughout its lifecycle. Before you can start a datafeed, the anomaly detection + job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, + it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, + it will be ignored. When Elasticsearch security features are enabled, your datafeed + remembers which roles the last user to create or update it had at the time of + creation or update and runs the query using those same roles. If you provided + secondary authorization headers when you created or updated the datafeed, those + credentials are used instead. ``_ @@ -2705,7 +2829,8 @@ async def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -2782,7 +2907,8 @@ async def stop_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. + Stops one or more data frame analytics jobs. A data frame analytics job can be + started and stopped multiple times throughout its lifecycle. ``_ @@ -2841,7 +2967,9 @@ async def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. + Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data + from Elasticsearch. A datafeed can be started and stopped multiple times throughout + its lifecycle. ``_ @@ -2896,7 +3024,7 @@ async def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + Stops a trained model deployment. ``_ @@ -2955,7 +3083,7 @@ async def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a data frame analytics job. + Updates an existing data frame analytics job. ``_ @@ -3056,7 +3184,11 @@ async def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a datafeed. + Updates the properties of a datafeed. You must stop and start the datafeed for + the changes to be applied. When Elasticsearch security features are enabled, + your datafeed remembers which roles the user who updated it had at the time of + the update and runs the query using those same roles. If you provide secondary + authorization headers, those credentials are used instead. ``_ @@ -3212,7 +3344,7 @@ async def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items. + Updates the description of a filter, adds items, or removes items from the list. ``_ @@ -3398,3 +3530,57 @@ async def update_job( return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body ) + + @_rewrite_parameters( + body_fields=("number_of_allocations",), + ) + async def update_trained_model_deployment( + self, + *, + model_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + number_of_allocations: t.Optional[int] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Starts a trained model deployment, which allocates the model to every machine + learning node. + + ``_ + + :param model_id: The unique identifier of the trained model. Currently, only + PyTorch models are supported. + :param number_of_allocations: The number of model allocations on each node where + the model is deployed. All allocations on a node share the same copy of the + model in memory but use a separate set of threads to evaluate the model. + Increasing this value generally increases the throughput. If this setting + is greater than the number of hardware threads it will automatically be changed + to a value less than the number of hardware threads. + """ + if model_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'model_id'") + __path = f"/_ml/trained_models/{_quote(model_id)}/deployment/_update" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if number_of_allocations is not None: + __body["number_of_allocations"] = number_of_allocations + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", __path, params=__query, headers=__headers, body=__body + ) diff --git a/elasticsearch_serverless/_async/client/query_ruleset.py b/elasticsearch_serverless/_async/client/query_ruleset.py index 3bc0e68..a0fb113 100644 --- a/elasticsearch_serverless/_async/client/query_ruleset.py +++ b/elasticsearch_serverless/_async/client/query_ruleset.py @@ -70,7 +70,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset. + Returns the details about a query ruleset ``_ @@ -107,7 +107,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists query rulesets. + Returns summarized information about existing query rulesets. ``_ diff --git a/elasticsearch_serverless/_async/client/search_application.py b/elasticsearch_serverless/_async/client/search_application.py index 747d0b6..3892eb7 100644 --- a/elasticsearch_serverless/_async/client/search_application.py +++ b/elasticsearch_serverless/_async/client/search_application.py @@ -104,7 +104,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application. + Returns the details about a search application ``_ @@ -304,16 +304,19 @@ async def search( human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, + typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application + Perform a search against a search application. ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override any defaults specified in the template. + :param typed_keys: Determines whether aggregation names are prefixed by their + respective types in the response. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -328,6 +331,8 @@ async def search( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if typed_keys is not None: + __query["typed_keys"] = typed_keys if not __body: if params is not None: __body["params"] = params diff --git a/elasticsearch_serverless/_async/client/security.py b/elasticsearch_serverless/_async/client/security.py index 295bebd..23e97e9 100644 --- a/elasticsearch_serverless/_async/client/security.py +++ b/elasticsearch_serverless/_async/client/security.py @@ -35,8 +35,12 @@ async def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables authentication as a user and retrieve information about the authenticated - user. + Enables you to submit a request with a basic auth header to authenticate a user + and retrieve information about the authenticated user. A successful call returns + a JSON structure that shows user information such as their username, the roles + that are assigned to the user, any assigned metadata, and information about the + realms that authenticated and authorized the user. If the user cannot be authenticated, + this API returns a 401 status code. ``_ """ @@ -75,7 +79,11 @@ async def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key for access without requiring basic authentication. + Creates an API key for access without requiring basic authentication. A successful + request returns a JSON structure that contains the API key, its unique id, and + its name. If applicable, it also returns expiration information for the API key + in milliseconds. NOTE: By default, API keys never expire. You can specify expiration + information when you create the API keys. ``_ @@ -142,7 +150,10 @@ async def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for one or more API keys. + Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` + privilege, this API returns only the API keys that you own. If you have `read_security`, + `manage_api_key` or greater privileges (including `manage_security`), this API + returns all API keys regardless of ownership. ``_ @@ -278,7 +289,13 @@ async def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more API keys. + Invalidates one or more API keys. The `manage_api_key` privilege allows deleting + any API keys. The `manage_own_api_key` only allows deleting API keys that are + owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation + request must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user’s identity. - Or, + if the request is issued by an API key, i.e. an API key invalidates itself, specify + its ID in the `ids` field. ``_ @@ -364,7 +381,8 @@ async def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for API keys using a subset of query DSL + Retrieves information for API keys in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -474,7 +492,22 @@ async def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates attributes of an existing API key. + Updates attributes of an existing API key. Users can only update API keys that + they created or that were granted to them. Use this API to update API keys created + by the create API Key or grant API Key APIs. If you need to apply the same update + to many API keys, you can use bulk update API Keys to reduce overhead. It’s not + possible to update expired API keys, or API keys that have been invalidated by + invalidate API Key. This API supports updates to an API key’s access scope and + metadata. The access scope of an API key is derived from the `role_descriptors` + you specify in the request, and a snapshot of the owner user’s permissions at + the time of the request. The snapshot of the owner’s permissions is updated automatically + on every call. If you don’t specify `role_descriptors` in the request, a call + to this API might still change the API key’s access scope. This change can occur + if the owner user’s permissions have changed since the API key was created or + last modified. To update another user’s API key, use the `run_as` feature to + submit a request on behalf of another user. IMPORTANT: It’s not possible to use + an API key as the authentication credential for this API. To update an API key, + the owner user’s credentials are required. ``_ diff --git a/elasticsearch_serverless/_async/client/synonyms.py b/elasticsearch_serverless/_async/client/synonyms.py index 9f89b99..d446c84 100644 --- a/elasticsearch_serverless/_async/client/synonyms.py +++ b/elasticsearch_serverless/_async/client/synonyms.py @@ -234,7 +234,7 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonyms set + Creates or updates a synonym set. ``_ diff --git a/elasticsearch_serverless/_async/client/transform.py b/elasticsearch_serverless/_async/client/transform.py index 28e888f..cab0492 100644 --- a/elasticsearch_serverless/_async/client/transform.py +++ b/elasticsearch_serverless/_async/client/transform.py @@ -39,7 +39,7 @@ async def delete_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing transform. + Deletes a transform. ``_ @@ -229,7 +229,10 @@ async def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a transform. + Previews a transform. It returns a maximum of 100 results. The calculations are + based on all the current data in the source index. It also generates a list of + mappings and settings for the destination index. These values are determined + based on the field types of the source index and the transform aggregations. ``_ @@ -337,7 +340,26 @@ async def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a transform. + Creates a transform. A transform copies data from source indices, transforms + it, and persists it into an entity-centric destination index. You can also think + of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a + hash of the entity, so there is a unique row per entity. You must choose either + the latest or pivot method for your transform; you cannot use both in a single + transform. If you choose to use the pivot method for your transform, the entities + are defined by the set of `group_by` fields in the pivot object. If you choose + to use the latest method, the entities are defined by the `unique_key` field + values in the latest object. You must have `create_index`, `index`, and `read` + privileges on the destination index and `read` and `view_index_metadata` privileges + on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation + and uses those same roles. If those roles do not have the required privileges + on the source and destination indices, the transform fails when it attempts unauthorized + operations. NOTE: You must use Kibana or this API to create a transform. Do not + add a transform directly into any `.transform-internal*` indices using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + any privileges on `.transform-internal*` indices. If you used transforms prior + to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. ``_ @@ -430,7 +452,9 @@ async def reset_transform( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing transform. + Resets a transform. Before you can reset it, you must stop it; alternatively, + use the `force` query parameter. If the destination index was created by the + transform, it is deleted. ``_ @@ -472,7 +496,10 @@ async def schedule_now_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedules now a transform. + Schedules now a transform. If you _schedule_now a transform, it will process + the new data instantly, without waiting for the configured frequency interval. + After _schedule_now API is called, the transform will be processed again at now + + frequency unless _schedule_now API is called again in the meantime. ``_ @@ -513,7 +540,23 @@ async def start_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more transforms. + Starts a transform. When you start a transform, it creates the destination index + if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` + is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions + for the destination index from the source indices and the transform aggregations. + If fields in the destination index are derived from scripts (as in the case of + `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic + mappings unless an index template exists. If it is a latest transform, it does + not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, + create the destination index before you start the transform. Alternatively, you + can create an index template, though it does not affect the deduced mappings + in a pivot transform. When the transform starts, a series of validations occur + to ensure its success. If you deferred validation when you created the transform, + they occur when you start the transform—​with the exception of privilege checks. + When Elasticsearch security features are enabled, the transform remembers which + roles the user that created it had at the time of creation and uses those same + roles. If those roles do not have the required privileges on the source and destination + indices, the transform fails when it attempts unauthorized operations. ``_ @@ -648,7 +691,13 @@ async def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a transform. + Updates certain properties of a transform. All updated properties except `description` + do not take effect until after the transform starts the next checkpoint, thus + there is data consistency in each checkpoint. To use this API, you must have + `read` and `view_index_metadata` privileges for the source indices. You must + also have `index` and `read` privileges for the destination index. When Elasticsearch + security features are enabled, the transform remembers which roles the user who + updated it had at the time of update and runs with those privileges. ``_ diff --git a/elasticsearch_serverless/_sync/client/__init__.py b/elasticsearch_serverless/_sync/client/__init__.py index 4b83337..7700fec 100644 --- a/elasticsearch_serverless/_sync/client/__init__.py +++ b/elasticsearch_serverless/_sync/client/__init__.py @@ -464,7 +464,8 @@ def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to perform multiple index/update/delete operations in a single request. + Performs multiple indexing or delete operations in a single API call. This reduces + overhead and can greatly increase indexing speed. ``_ @@ -554,7 +555,7 @@ def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explicitly clears the search context for a scroll. + Clears the search context and results for a scrolling search. ``_ @@ -597,7 +598,7 @@ def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time + Closes a point-in-time. ``_ @@ -788,8 +789,9 @@ def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new document in the index. Returns a 409 response when a document with - a same ID already exists in the index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -886,7 +888,7 @@ def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a document from the index. + Removes a JSON document from the specified index. ``_ @@ -1004,7 +1006,7 @@ def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes documents matching the provided query. + Deletes documents that match the specified query. ``_ @@ -1178,7 +1180,7 @@ def delete_script( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a script. + Deletes a stored script or search template. ``_ @@ -1240,7 +1242,7 @@ def exists( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document exists in an index. + Checks if a document in an index exists. ``_ @@ -1335,7 +1337,7 @@ def exists_source( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document source exists in an index. + Checks if a document's `_source` is stored. ``_ @@ -1429,7 +1431,8 @@ def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about why a specific matches (or doesn't match) a query. + Returns information about why a specific document matches (or doesn’t match) + a query. ``_ @@ -1541,7 +1544,10 @@ def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about the capabilities of fields among multiple indices. + The field capabilities API returns the information about the capabilities of + fields among multiple indices. The field capabilities API returns runtime fields + like any other field. For example, a runtime field with a type of keyword is + returned as any other field that belongs to the `keyword` family. ``_ @@ -1733,7 +1739,7 @@ def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a script. + Retrieves a stored script or search template. ``_ @@ -1885,7 +1891,9 @@ def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a document in an index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -2259,7 +2267,7 @@ def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search template operations in one request. + Runs multiple templated searches with a single request. ``_ @@ -2443,7 +2451,13 @@ def open_point_in_time( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time that can be used in subsequent searches + A search request by default executes against the most recent visible data of + the target indices, which is called point in time. Elasticsearch pit (point in + time) is a lightweight view into the state of the data as it existed when initiated. + In some cases, it’s preferred to perform multiple search requests using the same + point in time. For example, if refreshes happen between `search_after` requests, + then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time. ``_ @@ -2509,7 +2523,7 @@ def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a script. + Creates or updates a stored script or search template. ``_ @@ -2585,8 +2599,8 @@ def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to evaluate the quality of ranked search results over a set of typical - search queries + Enables you to evaluate the quality of ranked search results over a set of typical + search queries. ``_ @@ -2768,7 +2782,7 @@ def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Renders a search template as a search request body. ``_ @@ -2827,7 +2841,7 @@ def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows an arbitrary script to be executed and a result to be returned + Runs a script and returns a result. ``_ @@ -2937,6 +2951,7 @@ def scroll( "query", "rank", "rescore", + "retriever", "runtime_mappings", "script_fields", "search_after", @@ -3018,6 +3033,7 @@ def search( t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, + retriever: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -3058,7 +3074,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query. + Returns search hits that match the query defined in the request. You can provide + search queries using the `q` query string parameter or the request body. If both + are specified, only the query parameter is used. ``_ @@ -3177,6 +3195,9 @@ def search( example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. + :param retriever: A retriever is a specification to describe top documents returned + from a search. A retriever replaces other elements of the search API that + also return top documents such as query and knn. :param routing: Custom value used to route operations to a specific shard. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. @@ -3370,6 +3391,8 @@ def search( __body["rank"] = rank if rescore is not None: __body["rescore"] = rescore + if retriever is not None: + __body["retriever"] = retriever if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: @@ -3623,7 +3646,7 @@ def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Runs a search with a search template. ``_ @@ -4132,8 +4155,8 @@ def update_by_query( ) -> ObjectApiResponse[t.Any]: """ Updates documents that match the specified query. If no query is specified, performs - an update on every document in the index without changing the source, for example - to pick up a mapping change. + an update on every document in the data stream or index without modifying the + source, which is useful for picking up mapping changes. ``_ diff --git a/elasticsearch_serverless/_sync/client/async_search.py b/elasticsearch_serverless/_sync/client/async_search.py index 12071f7..264ed1e 100644 --- a/elasticsearch_serverless/_sync/client/async_search.py +++ b/elasticsearch_serverless/_sync/client/async_search.py @@ -36,8 +36,11 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by ID. If the search is still running, the search request - will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async search by identifier. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. If + the Elasticsearch security features are enabled, the deletion of a specific async + search is restricted to: the authenticated user that submitted the original search + request; users that have the `cancel_task` cluster privilege. ``_ @@ -77,7 +80,9 @@ def get( ) -> ObjectApiResponse[t.Any]: """ Retrieves the results of a previously submitted async search request given its - ID. + identifier. If the Elasticsearch security features are enabled, access to the + results of a specific async search is restricted to the user or API key that + submitted it. ``_ @@ -131,8 +136,10 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of a previously submitted async search request given its - ID. + Get async search status Retrieves the status of a previously submitted async + search request given its identifier, without retrieving search results. If the + Elasticsearch security features are enabled, use of this API is restricted to + the `monitoring_user` role. ``_ @@ -298,7 +305,15 @@ def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a search request asynchronously. + Runs a search request asynchronously. When the primary sort of the results is + an indexed field, shards get sorted based on minimum and maximum value that they + hold for that field, hence partial results become available following the sort + criteria that was requested. Warning: Async search does not support scroll nor + search requests that only include the suggest section. By default, Elasticsearch + doesn’t allow you to store an async search response larger than 10Mb and an attempt + to do this results in an error. The maximum allowed size for a stored async search + response can be set by changing the `search.max_async_search_response_size` cluster + level setting. ``_ diff --git a/elasticsearch_serverless/_sync/client/cat.py b/elasticsearch_serverless/_sync/client/cat.py index f1a2ef5..addc866 100644 --- a/elasticsearch_serverless/_sync/client/cat.py +++ b/elasticsearch_serverless/_sync/client/cat.py @@ -53,8 +53,11 @@ def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows information about currently configured aliases to indices including filter - and routing infos. + Retrieves the cluster’s index aliases, including filter and routing information. + The API does not return data stream aliases. IMPORTANT: cat APIs are only intended + for human consumption using the command line or the Kibana console. They are + not intended for use by applications. For application consumption, use the aliases + API. ``_ @@ -131,7 +134,11 @@ def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing component_templates templates. + Returns information about component templates in a cluster. Component templates + are building blocks for constructing index templates that specify index mappings, + settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get component template API. ``_ @@ -204,8 +211,12 @@ def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides quick access to the document count of the entire cluster, or individual - indices. + Provides quick access to a document count for a data stream, an index, or an + entire cluster. NOTE: The document count only includes live documents, not deleted + documents which have not yet been removed by the merge process. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the count API. ``_ @@ -363,8 +374,16 @@ def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about indices: number of primaries and replicas, document - counts, disk size, ... + Returns high-level information about indices in a cluster, including backing + indices for data streams. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index API. Use the cat indices API to + get the following information for each index in a cluster: shard count; document + count; deleted document count; primary store size; total store size of all shards, + including shard replicas. These metrics are retrieved directly from Lucene, which + Elasticsearch uses internally to power indexing and search. As a result, all + document counts include hidden nested documents. To get an accurate count of + Elasticsearch documents, use the cat count or count APIs. ``_ @@ -489,7 +508,10 @@ def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about data frame analytics jobs. + Returns configuration and usage information about data frame analytics jobs. + IMPORTANT: cat APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For application + consumption, use the get data frame analytics jobs statistics API. ``_ @@ -600,7 +622,12 @@ def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about datafeeds. + Returns configuration and usage information about datafeeds. This API returns + a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, + you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges + to use this API. IMPORTANT: cat APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get datafeed statistics API. ``_ @@ -717,7 +744,13 @@ def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about anomaly detection jobs. + Returns configuration and usage information for anomaly detection jobs. This + API returns a maximum of 10,000 jobs. If the Elasticsearch security features + are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` + cluster privileges to use this API. IMPORTANT: cat APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get anomaly detection + job statistics API. ``_ @@ -837,7 +870,10 @@ def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about inference trained models. + Returns configuration and usage information about inference trained models. IMPORTANT: + cat APIs are only intended for human consumption using the Kibana console or + command line. They are not intended for use by applications. For application + consumption, use the get trained models statistics API. ``_ @@ -960,7 +996,10 @@ def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about transforms. + Returns configuration and usage information about transforms. IMPORTANT: cat + APIs are only intended for human consumption using the Kibana console or command + line. They are not intended for use by applications. For application consumption, + use the get transform statistics API. ``_ diff --git a/elasticsearch_serverless/_sync/client/cluster.py b/elasticsearch_serverless/_sync/client/cluster.py index 22c2e11..0486421 100644 --- a/elasticsearch_serverless/_sync/client/cluster.py +++ b/elasticsearch_serverless/_sync/client/cluster.py @@ -40,7 +40,8 @@ def delete_component_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a component template + Deletes component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -139,7 +140,7 @@ def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns one or more component templates + Retrieves information about component templates. ``_ @@ -233,7 +234,6 @@ def put_component_template( *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, - cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -248,7 +248,19 @@ def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a component template + Creates or updates a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. + You can include comments anywhere in the request body except before the opening + curly bracket. ``_ @@ -263,7 +275,6 @@ def put_component_template( update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. - :param cause: :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or @@ -287,8 +298,6 @@ def put_component_template( __path = f"/_component_template/{_quote(name)}" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} - if cause is not None: - __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: diff --git a/elasticsearch_serverless/_sync/client/enrich.py b/elasticsearch_serverless/_sync/client/enrich.py index 2d543c8..43bda54 100644 --- a/elasticsearch_serverless/_sync/client/enrich.py +++ b/elasticsearch_serverless/_sync/client/enrich.py @@ -109,7 +109,7 @@ def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets information about an enrich policy. + Returns information about an enrich policy. ``_ @@ -151,7 +151,7 @@ def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new enrich policy. + Creates an enrich policy. ``_ @@ -197,7 +197,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets enrich coordinator statistics and information about enrich policies that + Returns enrich coordinator statistics and information about enrich policies that are currently executing. ``_ diff --git a/elasticsearch_serverless/_sync/client/eql.py b/elasticsearch_serverless/_sync/client/eql.py index 2229d15..92d594e 100644 --- a/elasticsearch_serverless/_sync/client/eql.py +++ b/elasticsearch_serverless/_sync/client/eql.py @@ -36,8 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search by ID. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async EQL search or a stored synchronous EQL search. The API also + deletes results for the search. ``_ @@ -77,7 +77,8 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns async results from previously executed Event Query Language (EQL) search + Returns the current status and available results for an async EQL search or a + stored synchronous EQL search. ``_ @@ -121,8 +122,8 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the status of a previously submitted async or stored Event Query Language - (EQL) search + Returns the current status for an async EQL search or a stored synchronous EQL + search without returning results. ``_ diff --git a/elasticsearch_serverless/_sync/client/graph.py b/elasticsearch_serverless/_sync/client/graph.py index d4801a6..b76171f 100644 --- a/elasticsearch_serverless/_sync/client/graph.py +++ b/elasticsearch_serverless/_sync/client/graph.py @@ -45,8 +45,8 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore extracted and summarized information about the documents and terms in - an index. + Extracts and summarizes information about the documents and terms in an Elasticsearch + data stream or index. ``_ diff --git a/elasticsearch_serverless/_sync/client/indices.py b/elasticsearch_serverless/_sync/client/indices.py index 812aee1..988d44e 100644 --- a/elasticsearch_serverless/_sync/client/indices.py +++ b/elasticsearch_serverless/_sync/client/indices.py @@ -129,8 +129,7 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the analysis process on a text and return the tokens breakdown of the - text. + Performs analysis on a text string and returns the resulting tokens. ``_ @@ -221,7 +220,7 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an index with optional settings and mappings. + Creates a new index. ``_ @@ -285,7 +284,8 @@ def create_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a data stream + Creates a data stream. You must have a matching index template with data stream + enabled. ``_ @@ -331,7 +331,7 @@ def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in a data stream. + Retrieves statistics for one or more data streams. ``_ @@ -386,7 +386,7 @@ def delete( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index. + Deletes one or more indices. ``_ @@ -452,7 +452,7 @@ def delete_alias( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an alias. + Removes a data stream or index from an alias. ``_ @@ -512,7 +512,8 @@ def delete_data_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the data stream lifecycle of the selected data streams. + Removes the data lifecycle from a data stream rendering it not managed by the + data stream lifecycle ``_ @@ -565,7 +566,7 @@ def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a data stream. + Deletes one or more data streams and their backing indices. ``_ @@ -608,7 +609,9 @@ def delete_index_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + The provided may contain multiple template names separated by + a comma. If multiple template names are specified then there is no wildcard support + and the provided names should match completely with existing templates. ``_ @@ -665,7 +668,7 @@ def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index exists. + Checks if a data stream, index, or alias exists. ``_ @@ -737,7 +740,7 @@ def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular alias exists. + Checks if an alias exists. ``_ @@ -911,7 +914,8 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more indices. + Returns information about one or more indices. For data streams, the API returns + information about the stream’s backing indices. ``_ @@ -994,7 +998,7 @@ def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an alias. + Retrieves information for one or more aliases. ``_ @@ -1065,7 +1069,7 @@ def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the data stream lifecycle of the selected data streams. + Retrieves the data stream lifecycle configuration of one or more data streams. ``_ @@ -1118,7 +1122,7 @@ def get_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns data streams. + Retrieves information about one or more data streams. ``_ @@ -1169,7 +1173,7 @@ def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Returns information about one or more index templates. ``_ @@ -1236,7 +1240,8 @@ def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mappings for one or more indices. + Retrieves mapping definitions for one or more indices. For data streams, the + API retrieves mappings for the stream’s backing indices. ``_ @@ -1314,7 +1319,8 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns settings for one or more indices. + Returns setting information for one or more indices. For data streams, returns + setting information for the stream’s backing indices. ``_ @@ -1388,7 +1394,14 @@ def migrate_to_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates an alias to a data stream + Converts an index alias to a data stream. You must have a matching index template + that is data stream enabled. The alias must meet the following criteria: The + alias must have a write index; All indices for the alias must have a `@timestamp` + field mapping of a `date` or `date_nanos` field type; The alias must not have + any filters; The alias must not use custom routing. If successful, the request + removes the alias and creates a data stream with the same name. The indices for + the alias become hidden backing indices for the stream. The write index for the + alias becomes the write index for the stream. ``_ @@ -1425,7 +1438,7 @@ def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Modifies a data stream + Performs one or more data stream modification actions in a single atomic operation. ``_ @@ -1482,7 +1495,7 @@ def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an alias. + Adds a data stream or index to an alias. ``_ @@ -1581,7 +1594,7 @@ def put_data_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the data stream lifecycle of the selected data streams. + Update the data lifecycle of the specified data streams. ``_ @@ -1677,7 +1690,8 @@ def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -1831,7 +1845,9 @@ def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index mappings. + Adds new fields to an existing data stream or index. You can also use this API + to change the search settings of existing fields. For data streams, these changes + are applied to all backing indices by default. ``_ @@ -1955,7 +1971,8 @@ def put_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index settings. + Changes a dynamic index setting in real time. For data streams, index setting + changes are applied to all backing indices by default. ``_ @@ -2052,7 +2069,8 @@ def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -2133,7 +2151,9 @@ def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the refresh operation in one or more indices. + A refresh makes recent operations performed on one or more indices available + for search. For data streams, the API runs the refresh operation on the stream’s + backing indices. ``_ @@ -2193,7 +2213,8 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about any matching indices, aliases, and data streams + Resolves the specified name(s) and/or index patterns for indices, aliases, and + data streams. Multiple patterns and remote clusters are supported. ``_ @@ -2251,8 +2272,7 @@ def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an alias to point to a new index when the existing index is considered - to be too large or too old. + Creates a new index for a data stream or index alias. ``_ @@ -2341,7 +2361,7 @@ def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate matching the given index name against the index templates in the system + ``_ @@ -2414,7 +2434,7 @@ def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate resolving the given template name or body + Returns the index configuration that would be applied by a particular index template. ``_ @@ -2532,7 +2552,7 @@ def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates index aliases. + Adds a data stream or index to an alias. ``_ @@ -2600,7 +2620,7 @@ def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a user to validate a potentially expensive query without executing it. + Validates a potentially expensive query without executing it. ``_ diff --git a/elasticsearch_serverless/_sync/client/inference.py b/elasticsearch_serverless/_sync/client/inference.py index 3cb0d1a..029a8d9 100644 --- a/elasticsearch_serverless/_sync/client/inference.py +++ b/elasticsearch_serverless/_sync/client/inference.py @@ -36,8 +36,10 @@ def delete( str, ] ] = None, + dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -48,6 +50,10 @@ def delete( :param inference_id: The inference Id :param task_type: The task type + :param dry_run: When true, the endpoint is not deleted, and a list of ingest + processors which reference this endpoint is returned + :param force: When true, the inference endpoint is forcefully deleted even if + it is still being used by ingest processors or semantic text fields """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -58,10 +64,14 @@ def delete( else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if force is not None: + __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: @@ -139,7 +149,7 @@ def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference + Perform inference on the service ``_ @@ -210,7 +220,7 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Configure an inference endpoint for use in the Inference API + Create an inference endpoint ``_ diff --git a/elasticsearch_serverless/_sync/client/ingest.py b/elasticsearch_serverless/_sync/client/ingest.py index a09566a..1f7995c 100644 --- a/elasticsearch_serverless/_sync/client/ingest.py +++ b/elasticsearch_serverless/_sync/client/ingest.py @@ -40,7 +40,7 @@ def delete_pipeline( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline. + Deletes one or more existing ingest pipeline. ``_ @@ -88,7 +88,8 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a pipeline. + Returns information about one or more ingest pipelines. This API returns a local + reference of the pipeline. ``_ @@ -131,7 +132,10 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of the built-in patterns. + Extracts structured fields out of a single text field within a document. You + choose which field to extract matched fields from, as well as the grok pattern + you expect will match. A grok pattern is like a regular expression that supports + aliased expressions that can be reused. ``_ """ @@ -175,7 +179,8 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline. + Creates or updates an ingest pipeline. Changes made using this API take effect + immediately. ``_ @@ -254,7 +259,7 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to simulate a pipeline with example documents. + Executes an ingest pipeline against a set of provided documents. ``_ diff --git a/elasticsearch_serverless/_sync/client/license.py b/elasticsearch_serverless/_sync/client/license.py index 7ecca5b..a61983b 100644 --- a/elasticsearch_serverless/_sync/client/license.py +++ b/elasticsearch_serverless/_sync/client/license.py @@ -37,7 +37,9 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves licensing information for the cluster + This API returns information about the type of license, when it was issued, and + when it expires, for example. For more information about the different types + of licenses, see https://www.elastic.co/subscriptions. ``_ diff --git a/elasticsearch_serverless/_sync/client/logstash.py b/elasticsearch_serverless/_sync/client/logstash.py index 0aceada..673785e 100644 --- a/elasticsearch_serverless/_sync/client/logstash.py +++ b/elasticsearch_serverless/_sync/client/logstash.py @@ -36,7 +36,7 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes Logstash Pipelines used by Central Management + Deletes a pipeline used for Logstash Central Management. ``_ @@ -70,7 +70,7 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves Logstash Pipelines used by Central Management + Retrieves pipelines used for Logstash Central Management. ``_ @@ -109,7 +109,7 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates Logstash Pipelines used for Central Management + Creates or updates a pipeline used for Logstash Central Management. ``_ diff --git a/elasticsearch_serverless/_sync/client/ml.py b/elasticsearch_serverless/_sync/client/ml.py index bdf4843..a09ee18 100644 --- a/elasticsearch_serverless/_sync/client/ml.py +++ b/elasticsearch_serverless/_sync/client/ml.py @@ -42,8 +42,19 @@ def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes one or more anomaly detection jobs. A job can be opened and closed multiple - times throughout its lifecycle. + Close anomaly detection jobs A job can be opened and closed multiple times throughout + its lifecycle. A closed job cannot receive data or perform analysis operations, + but you can still explore and navigate results. When you close a job, it runs + housekeeping tasks such as pruning the model history, flushing buffers, calculating + final results and persisting the model snapshots. Depending upon the size of + the job, it could take several minutes to close and the equivalent time to re-open. + After it is closed, the job has a minimal overhead on the cluster except for + maintaining its meta data. Therefore it is a best practice to close jobs that + are no longer required to process data. If you close an anomaly detection job + whose datafeed is running, the request first tries to stop the datafeed. This + behavior is equivalent to calling stop datafeed API with the same timeout and + force parameters as the close job request. When a datafeed that has a specified + end date stops, it automatically closes its associated job. ``_ @@ -97,7 +108,7 @@ def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a calendar. + Removes all scheduled events from a calendar, then deletes it. ``_ @@ -211,7 +222,7 @@ def delete_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing data frame analytics job. + Deletes a data frame analytics job. ``_ @@ -294,7 +305,9 @@ def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a filter. + Deletes a filter. If an anomaly detection job references the filter, you cannot + delete the filter. You must update or delete the job before you can delete the + filter. ``_ @@ -331,7 +344,12 @@ def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing anomaly detection job. + Deletes an anomaly detection job. All job configuration, model state and results + are deleted. It is not currently possible to delete multiple jobs using wildcards + or a comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling the + delete datafeed API with the same timeout and force parameters as the delete + job request. ``_ @@ -419,7 +437,9 @@ def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a model alias that refers to the trained model + Deletes a trained model alias. This API deletes an existing model alias that + refers to a trained model. If the model alias is missing or refers to a model + other than the one identified by the `model_id`, this API returns an error. ``_ @@ -465,7 +485,9 @@ def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimates the model memory + Makes an estimation of the memory usage for an anomaly detection job model. It + is based on analysis configuration details for the job and cardinality estimates + for the fields it references. ``_ @@ -523,7 +545,10 @@ def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates the data frame analytics for an annotated index. + Evaluates the data frame analytics for an annotated index. The API packages together + commonly used evaluation metrics for various types of machine learning features. + This has been designed for use on indexes created by data frame analytics. Evaluation + requires both a ground truth field and an analytics result field to be present. ``_ @@ -578,7 +603,14 @@ def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. + Forces any buffered data to be processed by the job. The flush jobs API is only + applicable when sending data for analysis using the post data API. Depending + on the content of the buffer, then it might additionally calculate new results. + Both flush and close operations are similar, however the flush is more efficient + if you are expecting to send more data for analysis. When flushing, the job remains + open and is available to continue analyzing data. A close operation additionally + prunes and persists the model state to disk and the job must be opened again + before analyzing further data. ``_ @@ -761,7 +793,9 @@ def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. + Retrieves configuration information for data frame analytics jobs. You can get + information for multiple data frame analytics jobs in a single API request by + using a comma-separated list of data frame analytics jobs or a wildcard expression. ``_ @@ -882,7 +916,12 @@ def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. + Retrieves usage information for datafeeds. You can get statistics for multiple + datafeeds in a single API request by using a comma-separated list of datafeeds + or a wildcard expression. You can get statistics for all datafeeds by using `_all`, + by specifying `*` as the ``, or by omitting the ``. If the + datafeed is stopped, the only information you receive is the `datafeed_id` and + the `state`. This API returns a maximum of 10,000 datafeeds. ``_ @@ -930,7 +969,11 @@ def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. + Retrieves configuration information for datafeeds. You can get information for + multiple datafeeds in a single API request by using a comma-separated list of + datafeeds or a wildcard expression. You can get information for all datafeeds + by using `_all`, by specifying `*` as the ``, or by omitting the ``. + This API returns a maximum of 10,000 datafeeds. ``_ @@ -985,7 +1028,7 @@ def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. + Retrieves filters. You can get a single filter or all filters. ``_ @@ -1076,7 +1119,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. + Retrieves configuration information for anomaly detection jobs. You can get information + for multiple anomaly detection jobs in a single API request by using a group + name, a comma-separated list of jobs, or a wildcard expression. You can get information + for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, + or by omitting the ``. ``_ @@ -1146,7 +1193,18 @@ def get_overall_buckets( ) -> ObjectApiResponse[t.Any]: """ Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. + anomaly detection jobs. The `overall_score` is calculated by combining the scores + of all the buckets within the overall bucket span. First, the maximum `anomaly_score` + per anomaly detection job in the overall bucket is calculated. Then the `top_n` + of those scores are averaged to result in the `overall_score`. This means that + you can fine-tune the `overall_score` so that it is more or less sensitive to + the number of jobs that detect an anomaly at the same time. For example, if you + set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall + bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` + is high only when all jobs detect anomalies in that overall bucket. If you set + the `bucket_span` parameter (to a value greater than its default), the `overall_score` + is the maximum `overall_score` of the overall buckets that have a span equal + to the jobs' largest bucket span. ``_ @@ -1208,7 +1266,7 @@ def get_overall_buckets( def get_trained_models( self, *, - model_id: t.Optional[str] = None, + model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -1224,14 +1282,16 @@ def get_trained_models( ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, - tags: t.Optional[str] = None, + tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained inference model. + Retrieves configuration information for a trained model. ``_ - :param model_id: The unique identifier of the trained model. + :param model_id: The unique identifier of the trained model or a model alias. + You can get information for multiple trained models in a single API request + by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions @@ -1299,7 +1359,9 @@ def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained inference models. + Retrieves usage information for trained models. You can get usage information + for multiple trained models in a single API request by using a comma-separated + list of model IDs or a wildcard expression. ``_ @@ -1354,7 +1416,7 @@ def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + Evaluates a trained model. ``_ @@ -1409,7 +1471,12 @@ def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens one or more anomaly detection jobs. + Opens one or more anomaly detection jobs. An anomaly detection job must be opened + in order for it to be ready to receive and analyze data. It can be opened and + closed multiple times throughout its lifecycle. When you open a new job, it starts + with an empty model. When you open an existing job, the most recent model state + is automatically loaded. The job is ready to resume its analysis from where it + left off, once new data is received. ``_ @@ -1456,7 +1523,7 @@ def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Posts scheduled events in a calendar. + Adds scheduled events to a calendar. ``_ @@ -1503,7 +1570,7 @@ def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews that will be analyzed given a data frame analytics config. + Previews the extracted features used by a data frame analytics config. ``_ @@ -1556,7 +1623,15 @@ def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. + Previews a datafeed. This API returns the first "page" of search results from + a datafeed. You can preview an existing datafeed or provide configuration details + for a datafeed and anomaly detection job in the API. The preview shows the structure + of the data that will be passed to the anomaly detection engine. IMPORTANT: When + Elasticsearch security features are enabled, the preview uses the credentials + of the user that called the API. However, when the datafeed starts it uses the + roles of the last user that created or updated the datafeed. To get a preview + that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials. ``_ @@ -1623,7 +1698,7 @@ def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a calendar. + Creates a calendar. ``_ @@ -1663,7 +1738,7 @@ def put_calendar_job( self, *, calendar_id: str, - job_id: str, + job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -1733,7 +1808,9 @@ def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. + Instantiates a data frame analytics job. This API creates a data frame analytics + job that performs an analysis on the source indices and stores the outcome in + a destination index. ``_ @@ -1894,7 +1971,17 @@ def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. + Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis + by an anomaly detection job. You can associate only one datafeed with each anomaly + detection job. The datafeed contains a query that runs at a defined interval + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. When Elasticsearch security features are enabled, your datafeed + remembers which roles the user who created it had at the time of creation and + runs the query using those same roles. If you provide secondary authorization + headers, those credentials are used instead. You must use Kibana, this API, or + the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the `.ml-config` index. Do not give users `write` privileges on the + `.ml-config` index. ``_ @@ -2040,7 +2127,9 @@ def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. + Instantiates a filter. A filter contains a list of strings. It can be used by + one or more anomaly detection jobs. Specifically, filters are referenced in the + `custom_rules` property of detector configuration objects. ``_ @@ -2119,7 +2208,8 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates an anomaly detection job. + Instantiates an anomaly detection job. If you include a `datafeed_config`, you + must have read index privileges on the source index. ``_ @@ -2292,7 +2382,7 @@ def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an inference trained model. + Enables you to supply a trained model that is not created by data frame analytics. ``_ @@ -2387,8 +2477,19 @@ def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new model alias (or reassigns an existing one) to refer to the trained - model + Creates or updates a trained model alias. A trained model alias is a logical + name used to reference a single trained model. You can use aliases instead of + trained model identifiers to make it easier to reference your models. For example, + you can use aliases in inference aggregations and processors. An alias must be + unique and refer to only a single trained model. However, you can have multiple + aliases for each trained model. If you use this API to update an alias such that + it references a different trained model ID and the model uses a different type + of data frame analytics, an error occurs. For example, this situation occurs + if you have a trained model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API returns + a warning. ``_ @@ -2437,7 +2538,7 @@ def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition + Creates part of a trained model definition. ``_ @@ -2504,7 +2605,9 @@ def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary + Creates a trained model vocabulary. This API is supported only for natural language + processing (NLP) models. The vocabulary is stored in the index as described in + `inference_config.*.vocabulary` of the trained model definition. ``_ @@ -2553,7 +2656,9 @@ def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing anomaly detection job. + Resets an anomaly detection job. All model state and results are deleted. The + job is ready to start over as if it had just been created. It is not currently + possible to reset multiple jobs using wildcards or a comma separated list. ``_ @@ -2597,7 +2702,16 @@ def start_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. + Starts a data frame analytics job. A data frame analytics job can be started + and stopped multiple times throughout its lifecycle. If the destination index + does not exist, it is created automatically the first time you start the data + frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` + settings for the destination index are copied from the source index. If there + are multiple source indices, the destination index copies the highest setting + values. The mappings for the destination index are also copied from the source + indices. If there are any mapping conflicts, the job fails to start. If the destination + index exists, it is used as is. You can therefore set up the destination index + in advance with custom settings and mappings. ``_ @@ -2643,7 +2757,17 @@ def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. + Starts one or more datafeeds. A datafeed must be started in order to retrieve + data from Elasticsearch. A datafeed can be started and stopped multiple times + throughout its lifecycle. Before you can start a datafeed, the anomaly detection + job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, + it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, + it will be ignored. When Elasticsearch security features are enabled, your datafeed + remembers which roles the last user to create or update it had at the time of + creation or update and runs the query using those same roles. If you provided + secondary authorization headers when you created or updated the datafeed, those + credentials are used instead. ``_ @@ -2705,7 +2829,8 @@ def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -2782,7 +2907,8 @@ def stop_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. + Stops one or more data frame analytics jobs. A data frame analytics job can be + started and stopped multiple times throughout its lifecycle. ``_ @@ -2841,7 +2967,9 @@ def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. + Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data + from Elasticsearch. A datafeed can be started and stopped multiple times throughout + its lifecycle. ``_ @@ -2896,7 +3024,7 @@ def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + Stops a trained model deployment. ``_ @@ -2955,7 +3083,7 @@ def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a data frame analytics job. + Updates an existing data frame analytics job. ``_ @@ -3056,7 +3184,11 @@ def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a datafeed. + Updates the properties of a datafeed. You must stop and start the datafeed for + the changes to be applied. When Elasticsearch security features are enabled, + your datafeed remembers which roles the user who updated it had at the time of + the update and runs the query using those same roles. If you provide secondary + authorization headers, those credentials are used instead. ``_ @@ -3212,7 +3344,7 @@ def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items. + Updates the description of a filter, adds items, or removes items from the list. ``_ @@ -3398,3 +3530,57 @@ def update_job( return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body ) + + @_rewrite_parameters( + body_fields=("number_of_allocations",), + ) + def update_trained_model_deployment( + self, + *, + model_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + number_of_allocations: t.Optional[int] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Starts a trained model deployment, which allocates the model to every machine + learning node. + + ``_ + + :param model_id: The unique identifier of the trained model. Currently, only + PyTorch models are supported. + :param number_of_allocations: The number of model allocations on each node where + the model is deployed. All allocations on a node share the same copy of the + model in memory but use a separate set of threads to evaluate the model. + Increasing this value generally increases the throughput. If this setting + is greater than the number of hardware threads it will automatically be changed + to a value less than the number of hardware threads. + """ + if model_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'model_id'") + __path = f"/_ml/trained_models/{_quote(model_id)}/deployment/_update" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if number_of_allocations is not None: + __body["number_of_allocations"] = number_of_allocations + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", __path, params=__query, headers=__headers, body=__body + ) diff --git a/elasticsearch_serverless/_sync/client/query_ruleset.py b/elasticsearch_serverless/_sync/client/query_ruleset.py index 821a5bd..688c1c1 100644 --- a/elasticsearch_serverless/_sync/client/query_ruleset.py +++ b/elasticsearch_serverless/_sync/client/query_ruleset.py @@ -70,7 +70,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset. + Returns the details about a query ruleset ``_ @@ -107,7 +107,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists query rulesets. + Returns summarized information about existing query rulesets. ``_ diff --git a/elasticsearch_serverless/_sync/client/search_application.py b/elasticsearch_serverless/_sync/client/search_application.py index 5b3d63a..1ad2d98 100644 --- a/elasticsearch_serverless/_sync/client/search_application.py +++ b/elasticsearch_serverless/_sync/client/search_application.py @@ -104,7 +104,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application. + Returns the details about a search application ``_ @@ -304,16 +304,19 @@ def search( human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, + typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application + Perform a search against a search application. ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override any defaults specified in the template. + :param typed_keys: Determines whether aggregation names are prefixed by their + respective types in the response. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -328,6 +331,8 @@ def search( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if typed_keys is not None: + __query["typed_keys"] = typed_keys if not __body: if params is not None: __body["params"] = params diff --git a/elasticsearch_serverless/_sync/client/security.py b/elasticsearch_serverless/_sync/client/security.py index ea94c07..e76c0b9 100644 --- a/elasticsearch_serverless/_sync/client/security.py +++ b/elasticsearch_serverless/_sync/client/security.py @@ -35,8 +35,12 @@ def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables authentication as a user and retrieve information about the authenticated - user. + Enables you to submit a request with a basic auth header to authenticate a user + and retrieve information about the authenticated user. A successful call returns + a JSON structure that shows user information such as their username, the roles + that are assigned to the user, any assigned metadata, and information about the + realms that authenticated and authorized the user. If the user cannot be authenticated, + this API returns a 401 status code. ``_ """ @@ -75,7 +79,11 @@ def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key for access without requiring basic authentication. + Creates an API key for access without requiring basic authentication. A successful + request returns a JSON structure that contains the API key, its unique id, and + its name. If applicable, it also returns expiration information for the API key + in milliseconds. NOTE: By default, API keys never expire. You can specify expiration + information when you create the API keys. ``_ @@ -142,7 +150,10 @@ def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for one or more API keys. + Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` + privilege, this API returns only the API keys that you own. If you have `read_security`, + `manage_api_key` or greater privileges (including `manage_security`), this API + returns all API keys regardless of ownership. ``_ @@ -278,7 +289,13 @@ def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more API keys. + Invalidates one or more API keys. The `manage_api_key` privilege allows deleting + any API keys. The `manage_own_api_key` only allows deleting API keys that are + owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation + request must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user’s identity. - Or, + if the request is issued by an API key, i.e. an API key invalidates itself, specify + its ID in the `ids` field. ``_ @@ -364,7 +381,8 @@ def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for API keys using a subset of query DSL + Retrieves information for API keys in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -474,7 +492,22 @@ def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates attributes of an existing API key. + Updates attributes of an existing API key. Users can only update API keys that + they created or that were granted to them. Use this API to update API keys created + by the create API Key or grant API Key APIs. If you need to apply the same update + to many API keys, you can use bulk update API Keys to reduce overhead. It’s not + possible to update expired API keys, or API keys that have been invalidated by + invalidate API Key. This API supports updates to an API key’s access scope and + metadata. The access scope of an API key is derived from the `role_descriptors` + you specify in the request, and a snapshot of the owner user’s permissions at + the time of the request. The snapshot of the owner’s permissions is updated automatically + on every call. If you don’t specify `role_descriptors` in the request, a call + to this API might still change the API key’s access scope. This change can occur + if the owner user’s permissions have changed since the API key was created or + last modified. To update another user’s API key, use the `run_as` feature to + submit a request on behalf of another user. IMPORTANT: It’s not possible to use + an API key as the authentication credential for this API. To update an API key, + the owner user’s credentials are required. ``_ diff --git a/elasticsearch_serverless/_sync/client/synonyms.py b/elasticsearch_serverless/_sync/client/synonyms.py index f8abb94..a978210 100644 --- a/elasticsearch_serverless/_sync/client/synonyms.py +++ b/elasticsearch_serverless/_sync/client/synonyms.py @@ -234,7 +234,7 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonyms set + Creates or updates a synonym set. ``_ diff --git a/elasticsearch_serverless/_sync/client/transform.py b/elasticsearch_serverless/_sync/client/transform.py index aaf43d2..3979e66 100644 --- a/elasticsearch_serverless/_sync/client/transform.py +++ b/elasticsearch_serverless/_sync/client/transform.py @@ -39,7 +39,7 @@ def delete_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing transform. + Deletes a transform. ``_ @@ -229,7 +229,10 @@ def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a transform. + Previews a transform. It returns a maximum of 100 results. The calculations are + based on all the current data in the source index. It also generates a list of + mappings and settings for the destination index. These values are determined + based on the field types of the source index and the transform aggregations. ``_ @@ -337,7 +340,26 @@ def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a transform. + Creates a transform. A transform copies data from source indices, transforms + it, and persists it into an entity-centric destination index. You can also think + of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a + hash of the entity, so there is a unique row per entity. You must choose either + the latest or pivot method for your transform; you cannot use both in a single + transform. If you choose to use the pivot method for your transform, the entities + are defined by the set of `group_by` fields in the pivot object. If you choose + to use the latest method, the entities are defined by the `unique_key` field + values in the latest object. You must have `create_index`, `index`, and `read` + privileges on the destination index and `read` and `view_index_metadata` privileges + on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation + and uses those same roles. If those roles do not have the required privileges + on the source and destination indices, the transform fails when it attempts unauthorized + operations. NOTE: You must use Kibana or this API to create a transform. Do not + add a transform directly into any `.transform-internal*` indices using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + any privileges on `.transform-internal*` indices. If you used transforms prior + to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. ``_ @@ -430,7 +452,9 @@ def reset_transform( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing transform. + Resets a transform. Before you can reset it, you must stop it; alternatively, + use the `force` query parameter. If the destination index was created by the + transform, it is deleted. ``_ @@ -472,7 +496,10 @@ def schedule_now_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedules now a transform. + Schedules now a transform. If you _schedule_now a transform, it will process + the new data instantly, without waiting for the configured frequency interval. + After _schedule_now API is called, the transform will be processed again at now + + frequency unless _schedule_now API is called again in the meantime. ``_ @@ -513,7 +540,23 @@ def start_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more transforms. + Starts a transform. When you start a transform, it creates the destination index + if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` + is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions + for the destination index from the source indices and the transform aggregations. + If fields in the destination index are derived from scripts (as in the case of + `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic + mappings unless an index template exists. If it is a latest transform, it does + not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, + create the destination index before you start the transform. Alternatively, you + can create an index template, though it does not affect the deduced mappings + in a pivot transform. When the transform starts, a series of validations occur + to ensure its success. If you deferred validation when you created the transform, + they occur when you start the transform—​with the exception of privilege checks. + When Elasticsearch security features are enabled, the transform remembers which + roles the user that created it had at the time of creation and uses those same + roles. If those roles do not have the required privileges on the source and destination + indices, the transform fails when it attempts unauthorized operations. ``_ @@ -648,7 +691,13 @@ def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a transform. + Updates certain properties of a transform. All updated properties except `description` + do not take effect until after the transform starts the next checkpoint, thus + there is data consistency in each checkpoint. To use this API, you must have + `read` and `view_index_metadata` privileges for the source indices. You must + also have `index` and `read` privileges for the destination index. When Elasticsearch + security features are enabled, the transform remembers which roles the user who + updated it had at the time of update and runs with those privileges. ``_