diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 3661d769d..440e211e1 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -638,7 +638,8 @@ async def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to perform multiple index/update/delete operations in a single request. + Performs multiple indexing or delete operations in a single API call. This reduces + overhead and can greatly increase indexing speed. ``_ @@ -737,7 +738,7 @@ async def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explicitly clears the search context for a scroll. + Clears the search context and results for a scrolling search. ``_ @@ -787,7 +788,7 @@ async def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time + Closes a point-in-time. ``_ @@ -994,8 +995,9 @@ async def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new document in the index. Returns a 409 response when a document with - a same ID already exists in the index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -1099,7 +1101,7 @@ async def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a document from the index. + Removes a JSON document from the specified index. ``_ @@ -1223,7 +1225,7 @@ async def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes documents matching the provided query. + Deletes documents that match the specified query. ``_ @@ -1449,7 +1451,7 @@ async def delete_script( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a script. + Deletes a stored script or search template. ``_ @@ -1517,7 +1519,7 @@ async def exists( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document exists in an index. + Checks if a document in an index exists. ``_ @@ -1618,7 +1620,7 @@ async def exists_source( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document source exists in an index. + Checks if a document's `_source` is stored. ``_ @@ -1718,7 +1720,8 @@ async def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about why a specific matches (or doesn't match) a query. + Returns information about why a specific document matches (or doesn’t match) + a query. ``_ @@ -1837,7 +1840,10 @@ async def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about the capabilities of fields among multiple indices. + The field capabilities API returns the information about the capabilities of + fields among multiple indices. The field capabilities API returns runtime fields + like any other field. For example, a runtime field with a type of keyword is + returned as any other field that belongs to the `keyword` family. ``_ @@ -2044,7 +2050,7 @@ async def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a script. + Retrieves a stored script or search template. ``_ @@ -2334,7 +2340,9 @@ async def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a document in an index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -2843,7 +2851,7 @@ async def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search template operations in one request. + Runs multiple templated searches with a single request. ``_ @@ -3045,7 +3053,13 @@ async def open_point_in_time( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time that can be used in subsequent searches + A search request by default executes against the most recent visible data of + the target indices, which is called point in time. Elasticsearch pit (point in + time) is a lightweight view into the state of the data as it existed when initiated. + In some cases, it’s preferred to perform multiple search requests using the same + point in time. For example, if refreshes happen between `search_after` requests, + then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time. ``_ @@ -3117,7 +3131,7 @@ async def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a script. + Creates or updates a stored script or search template. ``_ @@ -3202,8 +3216,8 @@ async def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to evaluate the quality of ranked search results over a set of typical - search queries + Enables you to evaluate the quality of ranked search results over a set of typical + search queries. ``_ @@ -3395,7 +3409,7 @@ async def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Reindex operation. + Copies documents from a source to a destination. ``_ @@ -3446,7 +3460,7 @@ async def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Renders a search template as a search request body. ``_ @@ -3514,7 +3528,7 @@ async def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows an arbitrary script to be executed and a result to be returned + Runs a script and returns a result. ``_ @@ -3761,7 +3775,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query. + Returns search hits that match the query defined in the request. You can provide + search queries using the `q` query string parameter or the request body. If both + are specified, only the query parameter is used. ``_ @@ -4439,7 +4455,7 @@ async def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Runs a search with a search template. ``_ @@ -4980,8 +4996,8 @@ async def update_by_query( ) -> ObjectApiResponse[t.Any]: """ Updates documents that match the specified query. If no query is specified, performs - an update on every document in the index without changing the source, for example - to pick up a mapping change. + an update on every document in the data stream or index without modifying the + source, which is useful for picking up mapping changes. ``_ diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index d0f1affd6..b576b39dc 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -36,8 +36,11 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by ID. If the search is still running, the search request - will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async search by identifier. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. If + the Elasticsearch security features are enabled, the deletion of a specific async + search is restricted to: the authenticated user that submitted the original search + request; users that have the `cancel_task` cluster privilege. ``_ @@ -83,7 +86,9 @@ async def get( ) -> ObjectApiResponse[t.Any]: """ Retrieves the results of a previously submitted async search request given its - ID. + identifier. If the Elasticsearch security features are enabled, access to the + results of a specific async search is restricted to the user or API key that + submitted it. ``_ @@ -143,8 +148,10 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of a previously submitted async search request given its - ID. + Get async search status Retrieves the status of a previously submitted async + search request given its identifier, without retrieving search results. If the + Elasticsearch security features are enabled, use of this API is restricted to + the `monitoring_user` role. ``_ @@ -316,7 +323,15 @@ async def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a search request asynchronously. + Runs a search request asynchronously. When the primary sort of the results is + an indexed field, shards get sorted based on minimum and maximum value that they + hold for that field, hence partial results become available following the sort + criteria that was requested. Warning: Async search does not support scroll nor + search requests that only include the suggest section. By default, Elasticsearch + doesn’t allow you to store an async search response larger than 10Mb and an attempt + to do this results in an error. The maximum allowed size for a stored async search + response can be set by changing the `search.max_async_search_response_size` cluster + level setting. ``_ diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 96922f928..3035c93b7 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -53,8 +53,11 @@ async def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows information about currently configured aliases to indices including filter - and routing infos. + Retrieves the cluster’s index aliases, including filter and routing information. + The API does not return data stream aliases. IMPORTANT: cat APIs are only intended + for human consumption using the command line or the Kibana console. They are + not intended for use by applications. For application consumption, use the aliases + API. ``_ @@ -142,8 +145,9 @@ async def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of how many shards are allocated to each data node and how - much disk space they are using. + Provides a snapshot of the number of shards allocated to each data node and their + disk space. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. ``_ @@ -227,7 +231,11 @@ async def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing component_templates templates. + Returns information about component templates in a cluster. Component templates + are building blocks for constructing index templates that specify index mappings, + settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get component template API. ``_ @@ -308,8 +316,12 @@ async def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides quick access to the document count of the entire cluster, or individual - indices. + Provides quick access to a document count for a data stream, an index, or an + entire cluster. NOTE: The document count only includes live documents, not deleted + documents which have not yet been removed by the merge process. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the count API. ``_ @@ -394,8 +406,10 @@ async def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows how much heap memory is currently being used by fielddata on every data - node in the cluster. + Returns the amount of heap memory currently used by the field data cache on every + data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the nodes stats API. ``_ @@ -482,7 +496,17 @@ async def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a concise representation of the cluster health. + Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the cluster health API. This API is often used to check malfunctioning clusters. + To help you track cluster health alongside log files and alerting systems, the + API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but + includes no date information; `Unix epoch time`, which is machine-sortable and + includes date information. The latter format is useful for cluster recoveries + that take multiple days. You can use the cat health API to verify cluster health + across multiple nodes. You also can use the API to track the recovery of a large + cluster over a longer period of time. ``_ @@ -652,8 +676,16 @@ async def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about indices: number of primaries and replicas, document - counts, disk size, ... + Returns high-level information about indices in a cluster, including backing + indices for data streams. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index API. Use the cat indices API to + get the following information for each index in a cluster: shard count; document + count; deleted document count; primary store size; total store size of all shards, + including shard replicas. These metrics are retrieved directly from Lucene, which + Elasticsearch uses internally to power indexing and search. As a result, all + document counts include hidden nested documents. To get an accurate count of + Elasticsearch documents, use the cat count or count APIs. ``_ @@ -754,7 +786,10 @@ async def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node. + Returns information about the master node, including the ID, bound IP address, + and name. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the nodes info API. ``_ @@ -859,7 +894,10 @@ async def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about data frame analytics jobs. + Returns configuration and usage information about data frame analytics jobs. + IMPORTANT: cat APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For application + consumption, use the get data frame analytics jobs statistics API. ``_ @@ -978,7 +1016,12 @@ async def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about datafeeds. + Returns configuration and usage information about datafeeds. This API returns + a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, + you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges + to use this API. IMPORTANT: cat APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get datafeed statistics API. ``_ @@ -1103,7 +1146,13 @@ async def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about anomaly detection jobs. + Returns configuration and usage information for anomaly detection jobs. This + API returns a maximum of 10,000 jobs. If the Elasticsearch security features + are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` + cluster privileges to use this API. IMPORTANT: cat APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get anomaly detection + job statistics API. ``_ @@ -1231,7 +1280,10 @@ async def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about inference trained models. + Returns configuration and usage information about inference trained models. IMPORTANT: + cat APIs are only intended for human consumption using the Kibana console or + command line. They are not intended for use by applications. For application + consumption, use the get trained models statistics API. ``_ @@ -1327,7 +1379,10 @@ async def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. + Returns information about custom node attributes. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + nodes info API. ``_ @@ -1405,7 +1460,10 @@ async def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns basic statistics about performance of cluster nodes. + Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + nodes info API. ``_ @@ -1489,7 +1547,10 @@ async def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a concise representation of the cluster pending tasks. + Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the pending cluster tasks API. ``_ @@ -1562,7 +1623,10 @@ async def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about installed plugins across nodes node. + Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes info API. ``_ @@ -1641,7 +1705,14 @@ async def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index shard recoveries, both on-going completed. + Returns information about ongoing and completed shard recoveries. Shard recovery + is the process of initializing a shard copy, such as restoring a primary shard + from a snapshot or syncing a replica shard from a primary shard. When a shard + recovery completes, the recovered shard is available for search and indexing. + For data streams, the API returns information about the stream’s backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the index recovery API. ``_ @@ -1732,7 +1803,10 @@ async def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about snapshot repositories registered in the cluster. + Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + get snapshot repository API. ``_ @@ -1809,7 +1883,11 @@ async def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides low-level information about the segments in the shards of an index. + Returns low-level information about the Lucene segments in index shards. For + data streams, the API returns information about the backing indices. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the index segments API. ``_ @@ -1897,7 +1975,10 @@ async def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a detailed view of shard allocation on nodes. + Returns information about the shards in a cluster. For data streams, the API + returns information about the backing indices. IMPORTANT: cat APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. ``_ @@ -1983,7 +2064,11 @@ async def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns all snapshots in a specific repository. + Returns information about the snapshots stored in one or more repositories. A + snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the get snapshot API. ``_ @@ -2072,8 +2157,10 @@ async def tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the tasks currently executing on one or more nodes - in the cluster. + Returns information about tasks currently executing in the cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the task management API. ``_ @@ -2161,7 +2248,11 @@ async def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing templates. + Returns information about index templates in a cluster. You can use index templates + to apply index settings and field mappings to new indices at creation. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the get index template API. ``_ @@ -2245,8 +2336,11 @@ async def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-wide thread pool statistics per node. By default the active, - queue and rejected statistics are returned for all thread pools. + Returns thread pool statistics for each node in a cluster. Returned information + includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes info API. ``_ @@ -2364,7 +2458,10 @@ async def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about transforms. + Returns configuration and usage information about transforms. IMPORTANT: cat + APIs are only intended for human consumption using the Kibana console or command + line. They are not intended for use by applications. For application consumption, + use the get transform statistics API. ``_ diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 6a14632a5..c5de06ab8 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -115,7 +115,8 @@ async def delete_component_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a component template + Deletes component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -271,7 +272,7 @@ async def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns one or more component templates + Retrieves information about component templates. ``_ @@ -336,7 +337,8 @@ async def get_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster settings. + Returns cluster-wide settings. By default, it returns only settings that have + been explicitly defined. ``_ @@ -420,7 +422,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns basic information about the health of the cluster. + The cluster health API returns a simple status on the health of the cluster. + You can also use the API to get the health status of only specified data streams + and indices. For data streams, the API retrieves the health status of the stream’s + backing indices. The cluster health status is: green, yellow or red. On the shard + level, a red status indicates that the specific shard is not allocated in the + cluster, yellow means that the primary shard is allocated but replicas are not, + and green means that all shards are allocated. The index level status is controlled + by the worst shard status. The cluster status is controlled by the worst index + status. ``_ @@ -570,8 +580,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of any cluster-level changes (e.g. create index, update mapping, - allocate or fail shard) which have not yet been executed. + Returns cluster-level changes (such as create index, update mapping, allocate + or fail shard) that have not yet been executed. NOTE: This API returns a list + of any pending updates to the cluster state. These are distinct from the tasks + reported by the Task Management API which include periodic tasks and tasks initiated + by the user, such as node stats, search queries, or create index requests. However, + if a user-initiated task such as a create index command causes a cluster state + update, the activity of this task might be reported by both task api and pending + cluster tasks API. ``_ @@ -683,7 +699,19 @@ async def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a component template + Creates or updates a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. + You can include comments anywhere in the request body except before the opening + curly bracket. ``_ @@ -828,7 +856,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about configured remote clusters. + The cluster remote info API allows you to retrieve all of the configured remote + cluster information. It returns connection and endpoint information keyed by + the configured remote cluster alias. ``_ """ @@ -1050,7 +1080,9 @@ async def stats( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns high-level overview of cluster statistics. + Returns cluster statistics. It returns basic index metrics (shard numbers, store + size, memory usage) and information about the current nodes that form the cluster + (number, roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index ad2020353..63c7e8ed9 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -121,7 +121,7 @@ async def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets information about an enrich policy. + Returns information about an enrich policy. ``_ @@ -171,7 +171,7 @@ async def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new enrich policy. + Creates an enrich policy. ``_ @@ -224,7 +224,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets enrich coordinator statistics and information about enrich policies that + Returns enrich coordinator statistics and information about enrich policies that are currently executing. ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index f02811fa7..8e032a273 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -36,8 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search by ID. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async EQL search or a stored synchronous EQL search. The API also + deletes results for the search. ``_ @@ -83,7 +83,8 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns async results from previously executed Event Query Language (EQL) search + Returns the current status and available results for an async EQL search or a + stored synchronous EQL search. ``_ @@ -133,8 +134,8 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the status of a previously submitted async or stored Event Query Language - (EQL) search + Returns the current status for an async EQL search or a stored synchronous EQL + search without returning results. ``_ diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index d39a86f28..99739035e 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -46,7 +46,7 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ESQL request + Executes an ES|QL request ``_ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index 85e6834d4..6c1f41034 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -125,9 +125,10 @@ async def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Multi Search API where the search will only be executed after specified checkpoints - are available due to a refresh. This API is designed for internal use by the - fleet server project. + Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) + with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) + API. However, similar to the fleet search API, it supports the wait_for_checkpoints + parameter. :param searches: :param index: A single target to search. If the target is an index alias, it @@ -369,9 +370,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Search API where the search will only be executed after specified checkpoints - are available due to a refresh. This API is designed for internal use by the - fleet server project. + The purpose of the fleet search api is to provide a search api where the search + will only be executed after provided checkpoint has been processed and is visible + for searches inside of Elasticsearch. :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index ce85020c5..998b990b9 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -45,8 +45,8 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore extracted and summarized information about the documents and terms in - an index. + Extracts and summarizes information about the documents and terms in an Elasticsearch + data stream or index. ``_ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index f1c383ef0..394b7f98c 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -40,8 +40,9 @@ async def delete_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. A currently used policy cannot - be deleted. + Deletes the specified lifecycle policy definition. You cannot delete policies + that are currently in use. If the policy is being used to manage any indices, + the request fails and returns an error. ``_ @@ -96,8 +97,9 @@ async def explain_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index's current lifecycle state, such as the - currently executing phase, action, and step. + Retrieves information about the index’s current lifecycle state, such as the + currently executing phase, action, and step. Shows when the index entered each + one, the definition of the running phase, and information about any failures. ``_ @@ -161,8 +163,7 @@ async def get_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the specified policy definition. Includes the policy version and last - modified date. + Retrieves a lifecycle policy. ``_ @@ -254,8 +255,10 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates the indices and ILM policies away from custom node attribute allocation - routing to data tiers routing + Switches the indices, ILM policies, and legacy, composable and component templates + from using custom node attributes and attribute-based allocation filters to using + data tiers, and optionally deletes one legacy index template.+ Using node roles + enables ILM to automatically move the indices between data tiers. ``_ @@ -376,7 +379,8 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy + Creates a lifecycle policy. If the specified policy exists, the policy is replaced + and the policy version is incremented. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index b73b03dff..392b054ab 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -138,8 +138,7 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the analysis process on a text and return the tokens breakdown of the - text. + Performs analysis on a text string and returns the resulting tokens. ``_ @@ -240,7 +239,8 @@ async def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears all or specific caches for one or more indices. + Clears the caches of one or more indices. For data streams, the API clears the + caches of the stream’s backing indices. ``_ @@ -327,7 +327,7 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an index + Clones an existing index. ``_ @@ -500,7 +500,7 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an index with optional settings and mappings. + Creates a new index. ``_ @@ -571,7 +571,8 @@ async def create_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a data stream + Creates a data stream. You must have a matching index template with data stream + enabled. ``_ @@ -623,7 +624,7 @@ async def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in a data stream. + Retrieves statistics for one or more data streams. ``_ @@ -686,7 +687,7 @@ async def delete( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index. + Deletes one or more indices. ``_ @@ -758,7 +759,7 @@ async def delete_alias( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an alias. + Removes a data stream or index from an alias. ``_ @@ -824,7 +825,8 @@ async def delete_data_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the data stream lifecycle of the selected data streams. + Removes the data lifecycle from a data stream rendering it not managed by the + data stream lifecycle ``_ @@ -883,7 +885,7 @@ async def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a data stream. + Deletes one or more data streams and their backing indices. ``_ @@ -932,7 +934,9 @@ async def delete_index_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + The provided may contain multiple template names separated by + a comma. If multiple template names are specified then there is no wildcard support + and the provided names should match completely with existing templates. ``_ @@ -986,7 +990,7 @@ async def delete_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + Deletes a legacy index template. ``_ @@ -1048,7 +1052,7 @@ async def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream + Analyzes the disk usage of each field of an index or data stream. ``_ @@ -1121,7 +1125,9 @@ async def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Downsample an index + Aggregates a time series (TSDS) index and stores pre-computed statistical summaries + (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped + by a configured time interval. ``_ @@ -1189,7 +1195,7 @@ async def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index exists. + Checks if a data stream, index, or alias exists. ``_ @@ -1267,7 +1273,7 @@ async def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular alias exists. + Checks if an alias exists. ``_ @@ -1510,7 +1516,7 @@ async def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the field usage stats for each field of an index + Returns field usage information for each shard and field of an index. ``_ @@ -1598,7 +1604,7 @@ async def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the flush operation on one or more indices. + Flushes one or more data streams or indices. ``_ @@ -1778,7 +1784,8 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more indices. + Returns information about one or more indices. For data streams, the API returns + information about the stream’s backing indices. ``_ @@ -1867,7 +1874,7 @@ async def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an alias. + Retrieves information for one or more aliases. ``_ @@ -1948,7 +1955,7 @@ async def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the data stream lifecycle of the selected data streams. + Retrieves the data stream lifecycle configuration of one or more data streams. ``_ @@ -2007,7 +2014,7 @@ async def get_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns data streams. + Retrieves information about one or more data streams. ``_ @@ -2073,7 +2080,8 @@ async def get_field_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mapping for one or more fields. + Retrieves mapping definitions for one or more fields. For data streams, the API + retrieves field mappings for the stream’s backing indices. ``_ @@ -2152,7 +2160,7 @@ async def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Returns information about one or more index templates. ``_ @@ -2227,7 +2235,8 @@ async def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mappings for one or more indices. + Retrieves mapping definitions for one or more indices. For data streams, the + API retrieves mappings for the stream’s backing indices. ``_ @@ -2313,7 +2322,8 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns settings for one or more indices. + Returns setting information for one or more indices. For data streams, returns + setting information for the stream’s backing indices. ``_ @@ -2402,7 +2412,7 @@ async def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Retrieves information about one or more index templates. ``_ @@ -2459,7 +2469,14 @@ async def migrate_to_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates an alias to a data stream + Converts an index alias to a data stream. You must have a matching index template + that is data stream enabled. The alias must meet the following criteria: The + alias must have a write index; All indices for the alias must have a `@timestamp` + field mapping of a `date` or `date_nanos` field type; The alias must not have + any filters; The alias must not use custom routing. If successful, the request + removes the alias and creates a data stream with the same name. The indices for + the alias become hidden backing indices for the stream. The write index for the + alias becomes the write index for the stream. ``_ @@ -2502,7 +2519,7 @@ async def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Modifies a data stream + Performs one or more data stream modification actions in a single atomic operation. ``_ @@ -2564,7 +2581,7 @@ async def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens an index. + Opens a closed index. For data streams, the API opens any closed backing indices. ``_ @@ -2698,7 +2715,7 @@ async def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an alias. + Adds a data stream or index to an alias. ``_ @@ -2804,7 +2821,7 @@ async def put_data_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the data stream lifecycle of the selected data streams. + Update the data lifecycle of the specified data streams. ``_ @@ -2907,7 +2924,8 @@ async def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -3068,7 +3086,9 @@ async def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index mappings. + Adds new fields to an existing data stream or index. You can also use this API + to change the search settings of existing fields. For data streams, these changes + are applied to all backing indices by default. ``_ @@ -3199,7 +3219,8 @@ async def put_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index settings. + Changes a dynamic index setting in real time. For data streams, index setting + changes are applied to all backing indices by default. ``_ @@ -3305,7 +3326,8 @@ async def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -3385,7 +3407,9 @@ async def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing index shard recoveries. + Returns information about ongoing and completed shard recoveries for one or more + indices. For data streams, the API returns information for the stream’s backing + indices. ``_ @@ -3447,7 +3471,9 @@ async def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the refresh operation in one or more indices. + A refresh makes recent operations performed on one or more indices available + for search. For data streams, the API runs the refresh operation on the stream’s + backing indices. ``_ @@ -3582,7 +3608,8 @@ async def resolve_cluster( ) -> ObjectApiResponse[t.Any]: """ Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. + including the local cluster, if included. Multiple patterns and remote clusters + are supported. ``_ @@ -3653,7 +3680,8 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about any matching indices, aliases, and data streams + Resolves the specified name(s) and/or index patterns for indices, aliases, and + data streams. Multiple patterns and remote clusters are supported. ``_ @@ -3717,8 +3745,7 @@ async def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an alias to point to a new index when the existing index is considered - to be too large or too old. + Creates a new index for a data stream or index alias. ``_ @@ -3823,7 +3850,8 @@ async def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides low-level information about segments in a Lucene index. + Returns low-level information about the Lucene segments in index shards. For + data streams, the API returns information about the stream’s backing indices. ``_ @@ -3902,7 +3930,8 @@ async def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides store information for shard copies of indices. + Retrieves store information about replica shards in one or more indices. For + data streams, the API retrieves store information for the stream’s backing indices. ``_ @@ -3975,7 +4004,7 @@ async def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allow to shrink an existing index into a new index with fewer primary shards. + Shrinks an existing index into a new index with fewer primary shards. ``_ @@ -4052,7 +4081,7 @@ async def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate matching the given index name against the index templates in the system + ``_ @@ -4131,7 +4160,7 @@ async def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate resolving the given template name or body + Returns the index configuration that would be applied by a particular index template. ``_ @@ -4264,7 +4293,7 @@ async def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows you to split an existing index into a new index with more primary shards. + Splits an existing index into a new index with more primary shards. ``_ @@ -4356,7 +4385,8 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in an index. + Returns statistics for one or more indices. For data streams, the API retrieves + statistics for the stream’s backing indices. ``_ @@ -4461,8 +4491,7 @@ async def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. + Unfreezes an index. ``_ @@ -4538,7 +4567,7 @@ async def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates index aliases. + Adds a data stream or index to an alias. ``_ @@ -4613,7 +4642,7 @@ async def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a user to validate a potentially expensive query without executing it. + Validates a potentially expensive query without executing it. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index c7669d1e6..89df532ee 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -36,8 +36,10 @@ async def delete( str, ] ] = None, + dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -48,6 +50,10 @@ async def delete( :param inference_id: The inference Id :param task_type: The task type + :param dry_run: When true, the endpoint is not deleted, and a list of ingest + processors which reference this endpoint is returned + :param force: When true, the inference endpoint is forcefully deleted even if + it is still being used by ingest processors or semantic text fields """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -64,10 +70,14 @@ async def delete( else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if force is not None: + __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: @@ -162,7 +172,7 @@ async def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference + Perform inference on the service ``_ @@ -245,7 +255,7 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Configure an inference endpoint for use in the Inference API + Create an inference endpoint ``_ diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 57af35b2c..321bd6181 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -40,7 +40,7 @@ async def delete_pipeline( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline. + Deletes one or more existing ingest pipeline. ``_ @@ -89,7 +89,7 @@ async def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistical information about geoip databases + Gets download statistics for GeoIP2 databases used with the geoip processor. ``_ """ @@ -129,7 +129,8 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a pipeline. + Returns information about one or more ingest pipelines. This API returns a local + reference of the pipeline. ``_ @@ -180,7 +181,10 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of the built-in patterns. + Extracts structured fields out of a single text field within a document. You + choose which field to extract matched fields from, as well as the grok pattern + you expect will match. A grok pattern is like a regular expression that supports + aliased expressions that can be reused. ``_ """ @@ -230,7 +234,8 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline. + Creates or updates an ingest pipeline. Changes made using this API take effect + immediately. ``_ @@ -316,7 +321,7 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to simulate a pipeline with example documents. + Executes an ingest pipeline against a set of provided documents. ``_ diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 576aadefe..6a2265446 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -72,7 +72,9 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves licensing information for the cluster + This API returns information about the type of license, when it was issued, and + when it expires, for example. For more information about the different types + of licenses, see https://www.elastic.co/subscriptions. ``_ @@ -248,7 +250,12 @@ async def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an indefinite basic license. + The start basic API enables you to initiate an indefinite basic license, which + gives access to all the basic features. If the basic license does not support + all of the features that are available with your current license, however, you + are notified in the response. You must then re-submit the API request with the + acknowledge parameter set to true. To check the status of your basic license, + use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). ``_ @@ -290,7 +297,8 @@ async def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - starts a limited time trial license. + The start trial API enables you to start a 30-day trial, which gives access to + all subscription features. ``_ diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 63a9c37b2..882a1f633 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -36,7 +36,7 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes Logstash Pipelines used by Central Management + Deletes a pipeline used for Logstash Central Management. ``_ @@ -76,7 +76,7 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves Logstash Pipelines used by Central Management + Retrieves pipelines used for Logstash Central Management. ``_ @@ -123,7 +123,7 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates Logstash Pipelines used for Central Management + Creates or updates a pipeline used for Logstash Central Management. ``_ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index ff8cd0ba7..42cf6df00 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -36,7 +36,11 @@ async def clear_trained_model_deployment_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cached results from a trained model deployment + Clears a trained model deployment cache on all nodes where the trained model + is assigned. A trained model deployment may have an inference cache enabled. + As requests are handled by each allocated node, their responses may be cached + on that individual node. Calling this API clears the caches without restarting + the deployment. ``_ @@ -84,8 +88,19 @@ async def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes one or more anomaly detection jobs. A job can be opened and closed multiple - times throughout its lifecycle. + Close anomaly detection jobs A job can be opened and closed multiple times throughout + its lifecycle. A closed job cannot receive data or perform analysis operations, + but you can still explore and navigate results. When you close a job, it runs + housekeeping tasks such as pruning the model history, flushing buffers, calculating + final results and persisting the model snapshots. Depending upon the size of + the job, it could take several minutes to close and the equivalent time to re-open. + After it is closed, the job has a minimal overhead on the cluster except for + maintaining its meta data. Therefore it is a best practice to close jobs that + are no longer required to process data. If you close an anomaly detection job + whose datafeed is running, the request first tries to stop the datafeed. This + behavior is equivalent to calling stop datafeed API with the same timeout and + force parameters as the close job request. When a datafeed that has a specified + end date stops, it automatically closes its associated job. ``_ @@ -146,7 +161,7 @@ async def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a calendar. + Removes all scheduled events from a calendar, then deletes it. ``_ @@ -284,7 +299,7 @@ async def delete_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing data frame analytics job. + Deletes a data frame analytics job. ``_ @@ -384,7 +399,13 @@ async def delete_expired_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes expired and unused machine learning data. + Deletes expired and unused machine learning data. Deletes all job results, model + snapshots and forecast data that have exceeded their retention days period. Machine + learning state documents that are not associated with any job are also deleted. + You can limit the request to a single or set of anomaly detection jobs by using + a job identifier, a group name, a comma-separated list of jobs, or a wildcard + expression. You can delete expired data for all anomaly detection jobs by using + _all, by specifying * as the , or by omitting the . ``_ @@ -443,7 +464,9 @@ async def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a filter. + Deletes a filter. If an anomaly detection job references the filter, you cannot + delete the filter. You must update or delete the job before you can delete the + filter. ``_ @@ -486,7 +509,10 @@ async def delete_forecast( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes forecasts from a machine learning job. + Deletes forecasts from a machine learning job. By default, forecasts are retained + for 14 days. You can specify a different retention period with the `expires_in` + parameter in the forecast jobs API. The delete forecast API enables you to delete + one or more forecasts before they expire. ``_ @@ -553,7 +579,12 @@ async def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing anomaly detection job. + Deletes an anomaly detection job. All job configuration, model state and results + are deleted. It is not currently possible to delete multiple jobs using wildcards + or a comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling the + delete datafeed API with the same timeout and force parameters as the delete + job request. ``_ @@ -607,7 +638,10 @@ async def delete_model_snapshot( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing model snapshot. + Deletes an existing model snapshot. You cannot delete the active model snapshot. + To delete that snapshot, first revert to a different one. To identify the active + model snapshot, refer to the `model_snapshot_id` in the results from the get + jobs API. ``_ @@ -700,7 +734,9 @@ async def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a model alias that refers to the trained model + Deletes a trained model alias. This API deletes an existing model alias that + refers to a trained model. If the model alias is missing or refers to a model + other than the one identified by the `model_id`, this API returns an error. ``_ @@ -755,7 +791,9 @@ async def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimates the model memory + Makes an estimation of the memory usage for an anomaly detection job model. It + is based on analysis configuration details for the job and cardinality estimates + for the fields it references. ``_ @@ -820,7 +858,10 @@ async def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates the data frame analytics for an annotated index. + Evaluates the data frame analytics for an annotated index. The API packages together + commonly used evaluation metrics for various types of machine learning features. + This has been designed for use on indexes created by data frame analytics. Evaluation + requires both a ground truth field and an analytics result field to be present. ``_ @@ -894,7 +935,13 @@ async def explain_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explains a data frame analytics config. + Explains a data frame analytics config. This API provides explanations for a + data frame analytics config that either exists already or one that has not been + created yet. The following explanations are provided: * which fields are included + or not in the analysis and why, * how much memory is estimated to be required. + The estimate can be used when deciding the appropriate value for model_memory_limit + setting later on. If you have object fields or fields that are excluded via source + filtering, they are not included in the explanation. ``_ @@ -994,7 +1041,14 @@ async def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. + Forces any buffered data to be processed by the job. The flush jobs API is only + applicable when sending data for analysis using the post data API. Depending + on the content of the buffer, then it might additionally calculate new results. + Both flush and close operations are similar, however the flush is more efficient + if you are expecting to send more data for analysis. When flushing, the job remains + open and is available to continue analyzing data. A close operation additionally + prunes and persists the model state to disk and the job must be opened again + before analyzing further data. ``_ @@ -1063,6 +1117,9 @@ async def forecast( ) -> ObjectApiResponse[t.Any]: """ Predicts the future behavior of a time series by using its historical behavior. + Forecasts are not supported for jobs that perform population analysis; an error + occurs if you try to create a forecast for a job that has an `over_field_name` + in its configuration. ``_ @@ -1144,7 +1201,8 @@ async def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more buckets. + Retrieves anomaly detection job results for one or more buckets. The API presents + a chronological view of the records, grouped by bucket. ``_ @@ -1460,7 +1518,9 @@ async def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. + Retrieves configuration information for data frame analytics jobs. You can get + information for multiple data frame analytics jobs in a single API request by + using a comma-separated list of data frame analytics jobs or a wildcard expression. ``_ @@ -1597,7 +1657,12 @@ async def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. + Retrieves usage information for datafeeds. You can get statistics for multiple + datafeeds in a single API request by using a comma-separated list of datafeeds + or a wildcard expression. You can get statistics for all datafeeds by using `_all`, + by specifying `*` as the ``, or by omitting the ``. If the + datafeed is stopped, the only information you receive is the `datafeed_id` and + the `state`. This API returns a maximum of 10,000 datafeeds. ``_ @@ -1653,7 +1718,11 @@ async def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. + Retrieves configuration information for datafeeds. You can get information for + multiple datafeeds in a single API request by using a comma-separated list of + datafeeds or a wildcard expression. You can get information for all datafeeds + by using `_all`, by specifying `*` as the ``, or by omitting the ``. + This API returns a maximum of 10,000 datafeeds. ``_ @@ -1716,7 +1785,7 @@ async def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. + Retrieves filters. You can get a single filter or all filters. ``_ @@ -1778,7 +1847,10 @@ async def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more influencers. + Retrieves anomaly detection job results for one or more influencers. Influencers + are the entities that have contributed to, or are to blame for, the anomalies. + Influencer results are available only if an `influencer_field_name` is specified + in the job configuration. ``_ @@ -1916,7 +1988,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. + Retrieves configuration information for anomaly detection jobs. You can get information + for multiple anomaly detection jobs in a single API request by using a group + name, a comma-separated list of jobs, or a wildcard expression. You can get information + for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, + or by omitting the ``. ``_ @@ -1979,7 +2055,9 @@ async def get_memory_stats( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on how ML is using memory. + Get information about how machine learning jobs and trained models are using + memory, on each node, both within the JVM heap, and natively, outside of the + JVM. ``_ @@ -2034,7 +2112,7 @@ async def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets stats for anomaly detection job model snapshot upgrades that are in progress. + Retrieves usage information for anomaly detection job model snapshot upgrades. ``_ @@ -2207,7 +2285,18 @@ async def get_overall_buckets( ) -> ObjectApiResponse[t.Any]: """ Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. + anomaly detection jobs. The `overall_score` is calculated by combining the scores + of all the buckets within the overall bucket span. First, the maximum `anomaly_score` + per anomaly detection job in the overall bucket is calculated. Then the `top_n` + of those scores are averaged to result in the `overall_score`. This means that + you can fine-tune the `overall_score` so that it is more or less sensitive to + the number of jobs that detect an anomaly at the same time. For example, if you + set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall + bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` + is high only when all jobs detect anomalies in that overall bucket. If you set + the `bucket_span` parameter (to a value greater than its default), the `overall_score` + is the maximum `overall_score` of the overall buckets that have a span equal + to the jobs' largest bucket span. ``_ @@ -2304,7 +2393,15 @@ async def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly records for an anomaly detection job. + Retrieves anomaly records for an anomaly detection job. Records contain the detailed + analytical results. They describe the anomalous activity that has been identified + in the input data based on the detector configuration. There can be many anomaly + records depending on the characteristics and size of the input data. In practice, + there are often too many to be able to manually process them. The machine learning + features therefore perform a sophisticated aggregation of the anomaly records + into buckets. The number of record results depends on the number of anomalies + found in each bucket, which relates to the number of time series being modeled + and the number of detectors. ``_ @@ -2374,7 +2471,7 @@ async def get_records( async def get_trained_models( self, *, - model_id: t.Optional[str] = None, + model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -2390,14 +2487,16 @@ async def get_trained_models( ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, - tags: t.Optional[str] = None, + tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained inference model. + Retrieves configuration information for a trained model. ``_ - :param model_id: The unique identifier of the trained model. + :param model_id: The unique identifier of the trained model or a model alias. + You can get information for multiple trained models in a single API request + by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions @@ -2473,7 +2572,9 @@ async def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained inference models. + Retrieves usage information for trained models. You can get usage information + for multiple trained models in a single API request by using a comma-separated + list of model IDs or a wildcard expression. ``_ @@ -2536,7 +2637,7 @@ async def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + Evaluates a trained model. ``_ @@ -2593,7 +2694,12 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns defaults and limits used by machine learning. + Returns defaults and limits used by machine learning. This endpoint is designed + to be used by a user interface that needs to fully understand machine learning + configurations where some options are not specified, meaning that the defaults + should be used. This endpoint may be used to find out what those defaults are. + It also provides information about the maximum size of machine learning jobs + that could run in the current cluster configuration. ``_ """ @@ -2633,7 +2739,12 @@ async def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens one or more anomaly detection jobs. + Opens one or more anomaly detection jobs. An anomaly detection job must be opened + in order for it to be ready to receive and analyze data. It can be opened and + closed multiple times throughout its lifecycle. When you open a new job, it starts + with an empty model. When you open an existing job, the most recent model state + is automatically loaded. The job is ready to resume its analysis from where it + left off, once new data is received. ``_ @@ -2687,7 +2798,7 @@ async def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Posts scheduled events in a calendar. + Adds scheduled events to a calendar. ``_ @@ -2743,7 +2854,9 @@ async def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sends data to an anomaly detection job for analysis. + Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, + data can be accepted from only a single connection at a time. It is not currently + possible to post data to multiple jobs using wildcards or a comma-separated list. ``_ @@ -2806,7 +2919,7 @@ async def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews that will be analyzed given a data frame analytics config. + Previews the extracted features used by a data frame analytics config. ``_ @@ -2868,7 +2981,15 @@ async def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. + Previews a datafeed. This API returns the first "page" of search results from + a datafeed. You can preview an existing datafeed or provide configuration details + for a datafeed and anomaly detection job in the API. The preview shows the structure + of the data that will be passed to the anomaly detection engine. IMPORTANT: When + Elasticsearch security features are enabled, the preview uses the credentials + of the user that called the API. However, when the datafeed starts it uses the + roles of the last user that created or updated the datafeed. To get a preview + that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials. ``_ @@ -2944,7 +3065,7 @@ async def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a calendar. + Creates a calendar. ``_ @@ -2991,7 +3112,7 @@ async def put_calendar_job( self, *, calendar_id: str, - job_id: str, + job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -3070,7 +3191,9 @@ async def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. + Instantiates a data frame analytics job. This API creates a data frame analytics + job that performs an analysis on the source indices and stores the outcome in + a destination index. ``_ @@ -3238,7 +3361,17 @@ async def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. + Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis + by an anomaly detection job. You can associate only one datafeed with each anomaly + detection job. The datafeed contains a query that runs at a defined interval + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. When Elasticsearch security features are enabled, your datafeed + remembers which roles the user who created it had at the time of creation and + runs the query using those same roles. If you provide secondary authorization + headers, those credentials are used instead. You must use Kibana, this API, or + the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the `.ml-config` index. Do not give users `write` privileges on the + `.ml-config` index. ``_ @@ -3391,7 +3524,9 @@ async def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. + Instantiates a filter. A filter contains a list of strings. It can be used by + one or more anomaly detection jobs. Specifically, filters are referenced in the + `custom_rules` property of detector configuration objects. ``_ @@ -3477,7 +3612,8 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates an anomaly detection job. + Instantiates an anomaly detection job. If you include a `datafeed_config`, you + must have read index privileges on the source index. ``_ @@ -3657,7 +3793,7 @@ async def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an inference trained model. + Enables you to supply a trained model that is not created by data frame analytics. ``_ @@ -3759,8 +3895,19 @@ async def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new model alias (or reassigns an existing one) to refer to the trained - model + Creates or updates a trained model alias. A trained model alias is a logical + name used to reference a single trained model. You can use aliases instead of + trained model identifiers to make it easier to reference your models. For example, + you can use aliases in inference aggregations and processors. An alias must be + unique and refer to only a single trained model. However, you can have multiple + aliases for each trained model. If you use this API to update an alias such that + it references a different trained model ID and the model uses a different type + of data frame analytics, an error occurs. For example, this situation occurs + if you have a trained model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API returns + a warning. ``_ @@ -3818,7 +3965,7 @@ async def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition + Creates part of a trained model definition. ``_ @@ -3895,7 +4042,9 @@ async def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary + Creates a trained model vocabulary. This API is supported only for natural language + processing (NLP) models. The vocabulary is stored in the index as described in + `inference_config.*.vocabulary` of the trained model definition. ``_ @@ -3951,7 +4100,9 @@ async def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing anomaly detection job. + Resets an anomaly detection job. All model state and results are deleted. The + job is ready to start over as if it had just been created. It is not currently + possible to reset multiple jobs using wildcards or a comma separated list. ``_ @@ -4005,7 +4156,13 @@ async def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reverts to a specific snapshot. + Reverts to a specific snapshot. The machine learning features react quickly to + anomalous input, learning new behaviors in data. Highly anomalous input increases + the variance in the models whilst the system learns whether this is a new step-change + in behavior or a one-off event. In the case where this anomalous input is known + to be a one-off, then it might be appropriate to reset the model state to a time + before this event. For example, you might consider reverting to a saved snapshot + after Black Friday or a critical system failure. ``_ @@ -4066,7 +4223,15 @@ async def set_upgrade_mode( ) -> ObjectApiResponse[t.Any]: """ Sets a cluster wide upgrade_mode setting that prepares machine learning indices - for an upgrade. + for an upgrade. When upgrading your cluster, in some circumstances you must restart + your nodes and reindex your machine learning indices. In those circumstances, + there must be no machine learning jobs running. You can close the machine learning + jobs, do the upgrade, then open all the jobs again. Alternatively, you can use + this API to temporarily halt tasks associated with the jobs and datafeeds and + prevent new jobs from opening. You can also use this API during upgrades that + do not require you to reindex your machine learning indices, though stopping + jobs is not a requirement in that case. You can see the current value for the + upgrade_mode setting by using the get machine learning info API. ``_ @@ -4112,7 +4277,16 @@ async def start_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. + Starts a data frame analytics job. A data frame analytics job can be started + and stopped multiple times throughout its lifecycle. If the destination index + does not exist, it is created automatically the first time you start the data + frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` + settings for the destination index are copied from the source index. If there + are multiple source indices, the destination index copies the highest setting + values. The mappings for the destination index are also copied from the source + indices. If there are any mapping conflicts, the job fails to start. If the destination + index exists, it is used as is. You can therefore set up the destination index + in advance with custom settings and mappings. ``_ @@ -4164,7 +4338,17 @@ async def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. + Starts one or more datafeeds. A datafeed must be started in order to retrieve + data from Elasticsearch. A datafeed can be started and stopped multiple times + throughout its lifecycle. Before you can start a datafeed, the anomaly detection + job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, + it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, + it will be ignored. When Elasticsearch security features are enabled, your datafeed + remembers which roles the last user to create or update it had at the time of + creation or update and runs the query using those same roles. If you provided + secondary authorization headers when you created or updated the datafeed, those + credentials are used instead. ``_ @@ -4233,7 +4417,8 @@ async def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -4316,7 +4501,8 @@ async def stop_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. + Stops one or more data frame analytics jobs. A data frame analytics job can be + started and stopped multiple times throughout its lifecycle. ``_ @@ -4381,7 +4567,9 @@ async def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. + Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data + from Elasticsearch. A datafeed can be started and stopped multiple times throughout + its lifecycle. ``_ @@ -4443,7 +4631,7 @@ async def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + Stops a trained model deployment. ``_ @@ -4508,7 +4696,7 @@ async def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a data frame analytics job. + Updates an existing data frame analytics job. ``_ @@ -4616,7 +4804,11 @@ async def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a datafeed. + Updates the properties of a datafeed. You must stop and start the datafeed for + the changes to be applied. When Elasticsearch security features are enabled, + your datafeed remembers which roles the user who updated it had at the time of + the update and runs the query using those same roles. If you provide secondary + authorization headers, those credentials are used instead. ``_ @@ -4779,7 +4971,7 @@ async def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items. + Updates the description of a filter, adds items, or removes items from the list. ``_ @@ -5058,7 +5250,8 @@ async def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -5088,7 +5281,11 @@ async def update_trained_model_deployment( if not __body: if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations - __headers = {"accept": "application/json", "content-type": "application/json"} + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, @@ -5113,7 +5310,13 @@ async def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades a given job snapshot to the current major version. + Upgrades an anomaly detection model snapshot to the latest major version. Over + time, older snapshot formats are deprecated and removed. Anomaly detection jobs + support only snapshots that are from the current or previous major version. This + API provides a means to upgrade a snapshot to the current major version. This + aids in preparing the cluster for an upgrade to the next major version. Only + one snapshot per anomaly detection job can be upgraded at a time and the upgraded + snapshot cannot be the current snapshot of the anomaly detection job. ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 3ec6ebd32..3a9e0da74 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -37,7 +37,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the archived repositories metering information present in the cluster. + You can use this API to clear the archived repositories metering information + in the cluster. ``_ @@ -85,7 +86,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster repositories metering information. + You can use the cluster repositories metering API to retrieve repositories metering + information in a cluster. This API exposes monotonically non-decreasing counters + and it’s expected that clients would durably store the information needed to + compute aggregations over a period of time. Additionally, the information exposed + by this API is volatile, meaning that it won’t be present after node restarts. ``_ @@ -140,7 +145,8 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - Returns information about hot threads on each node in the cluster. + This API yields a breakdown of the hot threads on each selected node in the cluster. + The output is plain text with a breakdown of each node’s top hot threads. ``_ @@ -217,7 +223,7 @@ async def info( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about nodes in the cluster. + Returns cluster nodes information. ``_ @@ -286,7 +292,7 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads secure settings. + Reloads the keystore on nodes in the cluster. ``_ @@ -359,7 +365,7 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistical information about nodes in the cluster. + Returns cluster nodes statistics. ``_ @@ -476,7 +482,7 @@ async def usage( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about REST actions usage on nodes. + Returns information on the usage of features. ``_ diff --git a/elasticsearch/_async/client/query_ruleset.py b/elasticsearch/_async/client/query_ruleset.py index 497f1bcbf..685a4ff36 100644 --- a/elasticsearch/_async/client/query_ruleset.py +++ b/elasticsearch/_async/client/query_ruleset.py @@ -76,7 +76,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset. + Returns the details about a query ruleset ``_ @@ -119,7 +119,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists query rulesets. + Returns summarized information about existing query rulesets. ``_ diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 6421a083d..42717380e 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -165,8 +165,8 @@ async def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the - index where rollup data is stored). + Returns the rollup capabilities of all jobs inside of a rollup index (for example, + the index where rollup data is stored). ``_ @@ -344,7 +344,7 @@ async def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard query DSL. + Enables searching rolled-up data using the standard Query DSL. ``_ diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 1b03b796b..73fc36897 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -116,7 +116,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application. + Returns the details about a search application ``_ @@ -353,7 +353,7 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application + Perform a search against a search application. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 1fecf18e8..bb032362c 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -44,7 +44,7 @@ async def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates the user profile on behalf of another user. + Creates or updates a user profile on behalf of another user. ``_ @@ -97,8 +97,12 @@ async def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables authentication as a user and retrieve information about the authenticated - user. + Enables you to submit a request with a basic auth header to authenticate a user + and retrieve information about the authenticated user. A successful call returns + a JSON structure that shows user information such as their username, the roles + that are assigned to the user, any assigned metadata, and information about the + realms that authenticated and authorized the user. If the user cannot be authenticated, + this API returns a 401 status code. ``_ """ @@ -204,7 +208,8 @@ async def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear a subset or all entries from the API key cache. + Evicts a subset of all entries from the API key cache. The cache is also automatically + cleared on state changes of the security index. ``_ @@ -431,7 +436,11 @@ async def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key for access without requiring basic authentication. + Creates an API key for access without requiring basic authentication. A successful + request returns a JSON structure that contains the API key, its unique id, and + its name. If applicable, it also returns expiration information for the API key + in milliseconds. NOTE: By default, API keys never expire. You can specify expiration + information when you create the API keys. ``_ @@ -503,7 +512,7 @@ async def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service account token for access without requiring basic authentication. + Creates a service accounts token for access without requiring basic authentication. ``_ @@ -1021,8 +1030,8 @@ async def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a kibana instance to configure itself to communicate with a secured elasticsearch - cluster. + Enables a Kibana instance to configure itself for communication with a secured + Elasticsearch cluster. ``_ """ @@ -1057,7 +1066,7 @@ async def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to enroll to an existing cluster with security enabled. + Allows a new node to join an existing cluster with security features enabled. ``_ """ @@ -1100,7 +1109,10 @@ async def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for one or more API keys. + Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` + privilege, this API returns only the API keys that you own. If you have `read_security`, + `manage_api_key` or greater privileges (including `manage_security`), this API + returns all API keys regardless of ownership. ``_ @@ -1259,7 +1271,9 @@ async def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather + than using file-based role management. The get roles API cannot retrieve roles + that are defined in roles files. ``_ @@ -1352,7 +1366,7 @@ async def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about service accounts. + This API returns a list of service accounts that match the provided path parameter(s). ``_ @@ -1629,7 +1643,7 @@ async def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves user profiles for the given unique ID(s). + Retrieves a user's profile using the unique profile ID. ``_ @@ -1693,7 +1707,21 @@ async def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. + Creates an API key on behalf of another user. This API is similar to Create API + keys, however it creates the API key for a user that is different than the user + that runs the API. The caller must have authentication credentials (either an + access token, or a username and password) for the user on whose behalf the API + key will be created. It is not possible to use this API to create an API key + without that user’s credentials. The user, for whom the authentication credentials + is provided, can optionally "run as" (impersonate) another user. In this case, + the API key will be created on behalf of the impersonated user. This API is intended + be used by applications that need to create and manage API keys for end users, + but cannot guarantee that those users have permission to create API keys on their + own behalf. A successful grant API key API call returns a JSON structure that + contains the API key, its unique id, and its name. If applicable, it also returns + expiration information for the API key in milliseconds. By default, API keys + never expire. You can specify expiration information when you create the API + keys. ``_ @@ -1893,7 +1921,13 @@ async def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more API keys. + Invalidates one or more API keys. The `manage_api_key` privilege allows deleting + any API keys. The `manage_own_api_key` only allows deleting API keys that are + owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation + request must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user’s identity. - Or, + if the request is issued by an API key, i.e. an API key invalidates itself, specify + its ID in the `ids` field. ``_ @@ -2104,7 +2138,9 @@ async def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather + than using file-based role management. The create or update roles API cannot + update roles that are defined in roles files. ``_ @@ -2122,6 +2158,9 @@ async def put_role( this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param run_as: A list of users that the owners of this role can impersonate. + *Note*: in Serverless, the run-as feature is disabled. For API compatibility, + you can still specify an empty `run_as` field, but a non-empty list will + be rejected. :param transient_metadata: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given @@ -2386,7 +2425,8 @@ async def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for API keys using a subset of query DSL + Retrieves information for API keys in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -2502,8 +2542,7 @@ async def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Exchanges a SAML Response message for an Elasticsearch access token and refresh - token pair + Submits a SAML Response message to Elasticsearch for consumption. ``_ @@ -2565,7 +2604,7 @@ async def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP + Verifies the logout response sent from the SAML IdP. ``_ @@ -2631,7 +2670,7 @@ async def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Consumes a SAML LogoutRequest + Submits a SAML LogoutRequest message to Elasticsearch for consumption. ``_ @@ -2698,8 +2737,7 @@ async def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates an access token and a refresh token that were generated via the SAML - Authenticate API + Submits a request to invalidate an access token and refresh token. ``_ @@ -2756,7 +2794,8 @@ async def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request + Creates a SAML authentication request () as a URL string, based + on the configuration of the respective SAML realm in Elasticsearch. ``_ @@ -2811,7 +2850,7 @@ async def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider + Generate SAML metadata for a SAML 2.0 Service Provider. ``_ @@ -2926,7 +2965,22 @@ async def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates attributes of an existing API key. + Updates attributes of an existing API key. Users can only update API keys that + they created or that were granted to them. Use this API to update API keys created + by the create API Key or grant API Key APIs. If you need to apply the same update + to many API keys, you can use bulk update API Keys to reduce overhead. It’s not + possible to update expired API keys, or API keys that have been invalidated by + invalidate API Key. This API supports updates to an API key’s access scope and + metadata. The access scope of an API key is derived from the `role_descriptors` + you specify in the request, and a snapshot of the owner user’s permissions at + the time of the request. The snapshot of the owner’s permissions is updated automatically + on every call. If you don’t specify `role_descriptors` in the request, a call + to this API might still change the API key’s access scope. This change can occur + if the owner user’s permissions have changed since the API key was created or + last modified. To update another user’s API key, use the `run_as` feature to + submit a request on behalf of another user. IMPORTANT: It’s not possible to use + an API key as the authentication credential for this API. To update an API key, + the owner user’s credentials are required. ``_ @@ -3001,7 +3055,8 @@ async def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update application specific data for the user profile of the given unique ID. + Updates specific data for the user profile that's associated with the specified + unique ID. ``_ diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index b0d2f9374..6d2746a85 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -40,7 +40,8 @@ async def cleanup_repository( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes stale data from repository. + Triggers the review of a snapshot repository’s contents and deletes any stale + data not referenced by existing snapshots. ``_ diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index 84b0baa3e..0f6ec9137 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -270,7 +270,7 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonyms set + Creates or updates a synonym set. ``_ diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 9744a8e32..cf5ebd978 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -158,7 +158,8 @@ async def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of tasks. + The task management API returns information about tasks currently executing on + one or more nodes in the cluster. ``_ diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 5966c14ac..90807b9cf 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -39,7 +39,7 @@ async def delete_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing transform. + Deletes a transform. ``_ @@ -249,7 +249,10 @@ async def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a transform. + Previews a transform. It returns a maximum of 100 results. The calculations are + based on all the current data in the source index. It also generates a list of + mappings and settings for the destination index. These values are determined + based on the field types of the source index and the transform aggregations. ``_ @@ -366,7 +369,26 @@ async def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a transform. + Creates a transform. A transform copies data from source indices, transforms + it, and persists it into an entity-centric destination index. You can also think + of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a + hash of the entity, so there is a unique row per entity. You must choose either + the latest or pivot method for your transform; you cannot use both in a single + transform. If you choose to use the pivot method for your transform, the entities + are defined by the set of `group_by` fields in the pivot object. If you choose + to use the latest method, the entities are defined by the `unique_key` field + values in the latest object. You must have `create_index`, `index`, and `read` + privileges on the destination index and `read` and `view_index_metadata` privileges + on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation + and uses those same roles. If those roles do not have the required privileges + on the source and destination indices, the transform fails when it attempts unauthorized + operations. NOTE: You must use Kibana or this API to create a transform. Do not + add a transform directly into any `.transform-internal*` indices using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + any privileges on `.transform-internal*` indices. If you used transforms prior + to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. ``_ @@ -466,7 +488,9 @@ async def reset_transform( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing transform. + Resets a transform. Before you can reset it, you must stop it; alternatively, + use the `force` query parameter. If the destination index was created by the + transform, it is deleted. ``_ @@ -514,7 +538,10 @@ async def schedule_now_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedules now a transform. + Schedules now a transform. If you _schedule_now a transform, it will process + the new data instantly, without waiting for the configured frequency interval. + After _schedule_now API is called, the transform will be processed again at now + + frequency unless _schedule_now API is called again in the meantime. ``_ @@ -561,7 +588,23 @@ async def start_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more transforms. + Starts a transform. When you start a transform, it creates the destination index + if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` + is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions + for the destination index from the source indices and the transform aggregations. + If fields in the destination index are derived from scripts (as in the case of + `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic + mappings unless an index template exists. If it is a latest transform, it does + not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, + create the destination index before you start the transform. Alternatively, you + can create an index template, though it does not affect the deduced mappings + in a pivot transform. When the transform starts, a series of validations occur + to ensure its success. If you deferred validation when you created the transform, + they occur when you start the transform—​with the exception of privilege checks. + When Elasticsearch security features are enabled, the transform remembers which + roles the user that created it had at the time of creation and uses those same + roles. If those roles do not have the required privileges on the source and destination + indices, the transform fails when it attempts unauthorized operations. ``_ @@ -708,7 +751,13 @@ async def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a transform. + Updates certain properties of a transform. All updated properties except `description` + do not take effect until after the transform starts the next checkpoint, thus + there is data consistency in each checkpoint. To use this API, you must have + `read` and `view_index_metadata` privileges for the source indices. You must + also have `index` and `read` privileges for the destination index. When Elasticsearch + security features are enabled, the transform remembers which roles the user who + updated it had at the time of update and runs with those privileges. ``_ @@ -789,7 +838,13 @@ async def upgrade_transforms( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. + Upgrades all transforms. This API identifies transforms that have a legacy configuration + format and upgrades them to the latest version. It also cleans up the internal + data structures that store the transform state and checkpoints. The upgrade does + not affect the source and destination indices. The upgrade also does not affect + the roles that transforms use when Elasticsearch security features are enabled; + the role used to read source data and write to the destination index remains + unchanged. ``_ diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 40fdc4e76..590f3231c 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -235,7 +235,13 @@ async def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces the execution of a stored watch. + This API can be used to force execution of the watch outside of its triggering + logic or to simulate the watch execution for debugging purposes. For testing + and debugging purposes, you also have fine-grained control on how the watch runs. + You can execute the watch without executing all of its actions or alternatively + by simulating them. You can also force execution by ignoring the watch condition + and control whether a watch record would be written to the watch history after + execution. ``_ diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 4db677e0b..680d831e7 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -41,7 +41,7 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the installed X-Pack features. + Provides general information about the installed X-Pack features. ``_ @@ -87,7 +87,8 @@ async def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information about the installed X-Pack features. + This API provides information about which features are currently enabled and + available under the current license and some usage statistics. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 97ca512cb..cc4112ec9 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -636,7 +636,8 @@ def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to perform multiple index/update/delete operations in a single request. + Performs multiple indexing or delete operations in a single API call. This reduces + overhead and can greatly increase indexing speed. ``_ @@ -735,7 +736,7 @@ def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explicitly clears the search context for a scroll. + Clears the search context and results for a scrolling search. ``_ @@ -785,7 +786,7 @@ def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time + Closes a point-in-time. ``_ @@ -992,8 +993,9 @@ def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new document in the index. Returns a 409 response when a document with - a same ID already exists in the index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -1097,7 +1099,7 @@ def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a document from the index. + Removes a JSON document from the specified index. ``_ @@ -1221,7 +1223,7 @@ def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes documents matching the provided query. + Deletes documents that match the specified query. ``_ @@ -1447,7 +1449,7 @@ def delete_script( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a script. + Deletes a stored script or search template. ``_ @@ -1515,7 +1517,7 @@ def exists( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document exists in an index. + Checks if a document in an index exists. ``_ @@ -1616,7 +1618,7 @@ def exists_source( ] = None, ) -> HeadApiResponse: """ - Returns information about whether a document source exists in an index. + Checks if a document's `_source` is stored. ``_ @@ -1716,7 +1718,8 @@ def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about why a specific matches (or doesn't match) a query. + Returns information about why a specific document matches (or doesn’t match) + a query. ``_ @@ -1835,7 +1838,10 @@ def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about the capabilities of fields among multiple indices. + The field capabilities API returns the information about the capabilities of + fields among multiple indices. The field capabilities API returns runtime fields + like any other field. For example, a runtime field with a type of keyword is + returned as any other field that belongs to the `keyword` family. ``_ @@ -2042,7 +2048,7 @@ def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a script. + Retrieves a stored script or search template. ``_ @@ -2332,7 +2338,9 @@ def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a document in an index. + Adds a JSON document to the specified data stream or index and makes it searchable. + If the target is an index and the document already exists, the request updates + the document and increments its version. ``_ @@ -2841,7 +2849,7 @@ def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search template operations in one request. + Runs multiple templated searches with a single request. ``_ @@ -3043,7 +3051,13 @@ def open_point_in_time( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time that can be used in subsequent searches + A search request by default executes against the most recent visible data of + the target indices, which is called point in time. Elasticsearch pit (point in + time) is a lightweight view into the state of the data as it existed when initiated. + In some cases, it’s preferred to perform multiple search requests using the same + point in time. For example, if refreshes happen between `search_after` requests, + then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time. ``_ @@ -3115,7 +3129,7 @@ def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a script. + Creates or updates a stored script or search template. ``_ @@ -3200,8 +3214,8 @@ def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to evaluate the quality of ranked search results over a set of typical - search queries + Enables you to evaluate the quality of ranked search results over a set of typical + search queries. ``_ @@ -3393,7 +3407,7 @@ def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Reindex operation. + Copies documents from a source to a destination. ``_ @@ -3444,7 +3458,7 @@ def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Renders a search template as a search request body. ``_ @@ -3512,7 +3526,7 @@ def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows an arbitrary script to be executed and a result to be returned + Runs a script and returns a result. ``_ @@ -3759,7 +3773,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query. + Returns search hits that match the query defined in the request. You can provide + search queries using the `q` query string parameter or the request body. If both + are specified, only the query parameter is used. ``_ @@ -4437,7 +4453,7 @@ def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to use the Mustache language to pre-render a search definition. + Runs a search with a search template. ``_ @@ -4978,8 +4994,8 @@ def update_by_query( ) -> ObjectApiResponse[t.Any]: """ Updates documents that match the specified query. If no query is specified, performs - an update on every document in the index without changing the source, for example - to pick up a mapping change. + an update on every document in the data stream or index without modifying the + source, which is useful for picking up mapping changes. ``_ diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 542d3dff2..8f53c4042 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -36,8 +36,11 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by ID. If the search is still running, the search request - will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async search by identifier. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. If + the Elasticsearch security features are enabled, the deletion of a specific async + search is restricted to: the authenticated user that submitted the original search + request; users that have the `cancel_task` cluster privilege. ``_ @@ -83,7 +86,9 @@ def get( ) -> ObjectApiResponse[t.Any]: """ Retrieves the results of a previously submitted async search request given its - ID. + identifier. If the Elasticsearch security features are enabled, access to the + results of a specific async search is restricted to the user or API key that + submitted it. ``_ @@ -143,8 +148,10 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of a previously submitted async search request given its - ID. + Get async search status Retrieves the status of a previously submitted async + search request given its identifier, without retrieving search results. If the + Elasticsearch security features are enabled, use of this API is restricted to + the `monitoring_user` role. ``_ @@ -316,7 +323,15 @@ def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a search request asynchronously. + Runs a search request asynchronously. When the primary sort of the results is + an indexed field, shards get sorted based on minimum and maximum value that they + hold for that field, hence partial results become available following the sort + criteria that was requested. Warning: Async search does not support scroll nor + search requests that only include the suggest section. By default, Elasticsearch + doesn’t allow you to store an async search response larger than 10Mb and an attempt + to do this results in an error. The maximum allowed size for a stored async search + response can be set by changing the `search.max_async_search_response_size` cluster + level setting. ``_ diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 5165d8b53..39d64c8fc 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -53,8 +53,11 @@ def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows information about currently configured aliases to indices including filter - and routing infos. + Retrieves the cluster’s index aliases, including filter and routing information. + The API does not return data stream aliases. IMPORTANT: cat APIs are only intended + for human consumption using the command line or the Kibana console. They are + not intended for use by applications. For application consumption, use the aliases + API. ``_ @@ -142,8 +145,9 @@ def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of how many shards are allocated to each data node and how - much disk space they are using. + Provides a snapshot of the number of shards allocated to each data node and their + disk space. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. ``_ @@ -227,7 +231,11 @@ def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing component_templates templates. + Returns information about component templates in a cluster. Component templates + are building blocks for constructing index templates that specify index mappings, + settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get component template API. ``_ @@ -308,8 +316,12 @@ def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides quick access to the document count of the entire cluster, or individual - indices. + Provides quick access to a document count for a data stream, an index, or an + entire cluster. NOTE: The document count only includes live documents, not deleted + documents which have not yet been removed by the merge process. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the count API. ``_ @@ -394,8 +406,10 @@ def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Shows how much heap memory is currently being used by fielddata on every data - node in the cluster. + Returns the amount of heap memory currently used by the field data cache on every + data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the nodes stats API. ``_ @@ -482,7 +496,17 @@ def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a concise representation of the cluster health. + Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the cluster health API. This API is often used to check malfunctioning clusters. + To help you track cluster health alongside log files and alerting systems, the + API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but + includes no date information; `Unix epoch time`, which is machine-sortable and + includes date information. The latter format is useful for cluster recoveries + that take multiple days. You can use the cat health API to verify cluster health + across multiple nodes. You also can use the API to track the recovery of a large + cluster over a longer period of time. ``_ @@ -652,8 +676,16 @@ def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about indices: number of primaries and replicas, document - counts, disk size, ... + Returns high-level information about indices in a cluster, including backing + indices for data streams. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index API. Use the cat indices API to + get the following information for each index in a cluster: shard count; document + count; deleted document count; primary store size; total store size of all shards, + including shard replicas. These metrics are retrieved directly from Lucene, which + Elasticsearch uses internally to power indexing and search. As a result, all + document counts include hidden nested documents. To get an accurate count of + Elasticsearch documents, use the cat count or count APIs. ``_ @@ -754,7 +786,10 @@ def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node. + Returns information about the master node, including the ID, bound IP address, + and name. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the nodes info API. ``_ @@ -859,7 +894,10 @@ def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about data frame analytics jobs. + Returns configuration and usage information about data frame analytics jobs. + IMPORTANT: cat APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For application + consumption, use the get data frame analytics jobs statistics API. ``_ @@ -978,7 +1016,12 @@ def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about datafeeds. + Returns configuration and usage information about datafeeds. This API returns + a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, + you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges + to use this API. IMPORTANT: cat APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get datafeed statistics API. ``_ @@ -1103,7 +1146,13 @@ def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about anomaly detection jobs. + Returns configuration and usage information for anomaly detection jobs. This + API returns a maximum of 10,000 jobs. If the Elasticsearch security features + are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` + cluster privileges to use this API. IMPORTANT: cat APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get anomaly detection + job statistics API. ``_ @@ -1231,7 +1280,10 @@ def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about inference trained models. + Returns configuration and usage information about inference trained models. IMPORTANT: + cat APIs are only intended for human consumption using the Kibana console or + command line. They are not intended for use by applications. For application + consumption, use the get trained models statistics API. ``_ @@ -1327,7 +1379,10 @@ def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. + Returns information about custom node attributes. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + nodes info API. ``_ @@ -1405,7 +1460,10 @@ def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns basic statistics about performance of cluster nodes. + Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + nodes info API. ``_ @@ -1489,7 +1547,10 @@ def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a concise representation of the cluster pending tasks. + Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the pending cluster tasks API. ``_ @@ -1562,7 +1623,10 @@ def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about installed plugins across nodes node. + Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes info API. ``_ @@ -1641,7 +1705,14 @@ def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index shard recoveries, both on-going completed. + Returns information about ongoing and completed shard recoveries. Shard recovery + is the process of initializing a shard copy, such as restoring a primary shard + from a snapshot or syncing a replica shard from a primary shard. When a shard + recovery completes, the recovered shard is available for search and indexing. + For data streams, the API returns information about the stream’s backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the index recovery API. ``_ @@ -1732,7 +1803,10 @@ def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about snapshot repositories registered in the cluster. + Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + intended for human consumption using the command line or Kibana console. They + are not intended for use by applications. For application consumption, use the + get snapshot repository API. ``_ @@ -1809,7 +1883,11 @@ def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides low-level information about the segments in the shards of an index. + Returns low-level information about the Lucene segments in index shards. For + data streams, the API returns information about the backing indices. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the index segments API. ``_ @@ -1897,7 +1975,10 @@ def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a detailed view of shard allocation on nodes. + Returns information about the shards in a cluster. For data streams, the API + returns information about the backing indices. IMPORTANT: cat APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. ``_ @@ -1983,7 +2064,11 @@ def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns all snapshots in a specific repository. + Returns information about the snapshots stored in one or more repositories. A + snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the get snapshot API. ``_ @@ -2072,8 +2157,10 @@ def tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the tasks currently executing on one or more nodes - in the cluster. + Returns information about tasks currently executing in the cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the task management API. ``_ @@ -2161,7 +2248,11 @@ def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about existing templates. + Returns information about index templates in a cluster. You can use index templates + to apply index settings and field mappings to new indices at creation. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the get index template API. ``_ @@ -2245,8 +2336,11 @@ def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-wide thread pool statistics per node. By default the active, - queue and rejected statistics are returned for all thread pools. + Returns thread pool statistics for each node in a cluster. Returned information + includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes info API. ``_ @@ -2364,7 +2458,10 @@ def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Gets configuration and usage information about transforms. + Returns configuration and usage information about transforms. IMPORTANT: cat + APIs are only intended for human consumption using the Kibana console or command + line. They are not intended for use by applications. For application consumption, + use the get transform statistics API. ``_ diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 3537ae559..961939fc9 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -115,7 +115,8 @@ def delete_component_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a component template + Deletes component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -271,7 +272,7 @@ def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns one or more component templates + Retrieves information about component templates. ``_ @@ -336,7 +337,8 @@ def get_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster settings. + Returns cluster-wide settings. By default, it returns only settings that have + been explicitly defined. ``_ @@ -420,7 +422,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns basic information about the health of the cluster. + The cluster health API returns a simple status on the health of the cluster. + You can also use the API to get the health status of only specified data streams + and indices. For data streams, the API retrieves the health status of the stream’s + backing indices. The cluster health status is: green, yellow or red. On the shard + level, a red status indicates that the specific shard is not allocated in the + cluster, yellow means that the primary shard is allocated but replicas are not, + and green means that all shards are allocated. The index level status is controlled + by the worst shard status. The cluster status is controlled by the worst index + status. ``_ @@ -570,8 +580,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of any cluster-level changes (e.g. create index, update mapping, - allocate or fail shard) which have not yet been executed. + Returns cluster-level changes (such as create index, update mapping, allocate + or fail shard) that have not yet been executed. NOTE: This API returns a list + of any pending updates to the cluster state. These are distinct from the tasks + reported by the Task Management API which include periodic tasks and tasks initiated + by the user, such as node stats, search queries, or create index requests. However, + if a user-initiated task such as a create index command causes a cluster state + update, the activity of this task might be reported by both task api and pending + cluster tasks API. ``_ @@ -683,7 +699,19 @@ def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a component template + Creates or updates a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. + You can include comments anywhere in the request body except before the opening + curly bracket. ``_ @@ -828,7 +856,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the information about configured remote clusters. + The cluster remote info API allows you to retrieve all of the configured remote + cluster information. It returns connection and endpoint information keyed by + the configured remote cluster alias. ``_ """ @@ -1050,7 +1080,9 @@ def stats( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns high-level overview of cluster statistics. + Returns cluster statistics. It returns basic index metrics (shard numbers, store + size, memory usage) and information about the current nodes that form the cluster + (number, roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 3596cb243..c6bf5f24b 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -121,7 +121,7 @@ def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets information about an enrich policy. + Returns information about an enrich policy. ``_ @@ -171,7 +171,7 @@ def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new enrich policy. + Creates an enrich policy. ``_ @@ -224,7 +224,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets enrich coordinator statistics and information about enrich policies that + Returns enrich coordinator statistics and information about enrich policies that are currently executing. ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 713e143f1..4c57f4318 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -36,8 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search by ID. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. + Deletes an async EQL search or a stored synchronous EQL search. The API also + deletes results for the search. ``_ @@ -83,7 +83,8 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns async results from previously executed Event Query Language (EQL) search + Returns the current status and available results for an async EQL search or a + stored synchronous EQL search. ``_ @@ -133,8 +134,8 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the status of a previously submitted async or stored Event Query Language - (EQL) search + Returns the current status for an async EQL search or a stored synchronous EQL + search without returning results. ``_ diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 1dee2e934..f3acd3879 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -46,7 +46,7 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ESQL request + Executes an ES|QL request ``_ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 10d3d8166..bb6c11b3b 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -125,9 +125,10 @@ def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Multi Search API where the search will only be executed after specified checkpoints - are available due to a refresh. This API is designed for internal use by the - fleet server project. + Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) + with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) + API. However, similar to the fleet search API, it supports the wait_for_checkpoints + parameter. :param searches: :param index: A single target to search. If the target is an index alias, it @@ -369,9 +370,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Search API where the search will only be executed after specified checkpoints - are available due to a refresh. This API is designed for internal use by the - fleet server project. + The purpose of the fleet search api is to provide a search api where the search + will only be executed after provided checkpoint has been processed and is visible + for searches inside of Elasticsearch. :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index aba6718c4..45f4965e6 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -45,8 +45,8 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore extracted and summarized information about the documents and terms in - an index. + Extracts and summarizes information about the documents and terms in an Elasticsearch + data stream or index. ``_ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index f3f16aac4..6884269de 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -40,8 +40,9 @@ def delete_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. A currently used policy cannot - be deleted. + Deletes the specified lifecycle policy definition. You cannot delete policies + that are currently in use. If the policy is being used to manage any indices, + the request fails and returns an error. ``_ @@ -96,8 +97,9 @@ def explain_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index's current lifecycle state, such as the - currently executing phase, action, and step. + Retrieves information about the index’s current lifecycle state, such as the + currently executing phase, action, and step. Shows when the index entered each + one, the definition of the running phase, and information about any failures. ``_ @@ -161,8 +163,7 @@ def get_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the specified policy definition. Includes the policy version and last - modified date. + Retrieves a lifecycle policy. ``_ @@ -254,8 +255,10 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates the indices and ILM policies away from custom node attribute allocation - routing to data tiers routing + Switches the indices, ILM policies, and legacy, composable and component templates + from using custom node attributes and attribute-based allocation filters to using + data tiers, and optionally deletes one legacy index template.+ Using node roles + enables ILM to automatically move the indices between data tiers. ``_ @@ -376,7 +379,8 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy + Creates a lifecycle policy. If the specified policy exists, the policy is replaced + and the policy version is incremented. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index aeca66804..d2e5b08d3 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -138,8 +138,7 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the analysis process on a text and return the tokens breakdown of the - text. + Performs analysis on a text string and returns the resulting tokens. ``_ @@ -240,7 +239,8 @@ def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears all or specific caches for one or more indices. + Clears the caches of one or more indices. For data streams, the API clears the + caches of the stream’s backing indices. ``_ @@ -327,7 +327,7 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an index + Clones an existing index. ``_ @@ -500,7 +500,7 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an index with optional settings and mappings. + Creates a new index. ``_ @@ -571,7 +571,8 @@ def create_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a data stream + Creates a data stream. You must have a matching index template with data stream + enabled. ``_ @@ -623,7 +624,7 @@ def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in a data stream. + Retrieves statistics for one or more data streams. ``_ @@ -686,7 +687,7 @@ def delete( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index. + Deletes one or more indices. ``_ @@ -758,7 +759,7 @@ def delete_alias( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an alias. + Removes a data stream or index from an alias. ``_ @@ -824,7 +825,8 @@ def delete_data_lifecycle( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the data stream lifecycle of the selected data streams. + Removes the data lifecycle from a data stream rendering it not managed by the + data stream lifecycle ``_ @@ -883,7 +885,7 @@ def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a data stream. + Deletes one or more data streams and their backing indices. ``_ @@ -932,7 +934,9 @@ def delete_index_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + The provided may contain multiple template names separated by + a comma. If multiple template names are specified then there is no wildcard support + and the provided names should match completely with existing templates. ``_ @@ -986,7 +990,7 @@ def delete_template( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an index template. + Deletes a legacy index template. ``_ @@ -1048,7 +1052,7 @@ def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream + Analyzes the disk usage of each field of an index or data stream. ``_ @@ -1121,7 +1125,9 @@ def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Downsample an index + Aggregates a time series (TSDS) index and stores pre-computed statistical summaries + (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped + by a configured time interval. ``_ @@ -1189,7 +1195,7 @@ def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index exists. + Checks if a data stream, index, or alias exists. ``_ @@ -1267,7 +1273,7 @@ def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular alias exists. + Checks if an alias exists. ``_ @@ -1510,7 +1516,7 @@ def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the field usage stats for each field of an index + Returns field usage information for each shard and field of an index. ``_ @@ -1598,7 +1604,7 @@ def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the flush operation on one or more indices. + Flushes one or more data streams or indices. ``_ @@ -1778,7 +1784,8 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more indices. + Returns information about one or more indices. For data streams, the API returns + information about the stream’s backing indices. ``_ @@ -1867,7 +1874,7 @@ def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an alias. + Retrieves information for one or more aliases. ``_ @@ -1948,7 +1955,7 @@ def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the data stream lifecycle of the selected data streams. + Retrieves the data stream lifecycle configuration of one or more data streams. ``_ @@ -2007,7 +2014,7 @@ def get_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns data streams. + Retrieves information about one or more data streams. ``_ @@ -2073,7 +2080,8 @@ def get_field_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mapping for one or more fields. + Retrieves mapping definitions for one or more fields. For data streams, the API + retrieves field mappings for the stream’s backing indices. ``_ @@ -2152,7 +2160,7 @@ def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Returns information about one or more index templates. ``_ @@ -2227,7 +2235,8 @@ def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns mappings for one or more indices. + Retrieves mapping definitions for one or more indices. For data streams, the + API retrieves mappings for the stream’s backing indices. ``_ @@ -2313,7 +2322,8 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns settings for one or more indices. + Returns setting information for one or more indices. For data streams, returns + setting information for the stream’s backing indices. ``_ @@ -2402,7 +2412,7 @@ def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns an index template. + Retrieves information about one or more index templates. ``_ @@ -2459,7 +2469,14 @@ def migrate_to_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrates an alias to a data stream + Converts an index alias to a data stream. You must have a matching index template + that is data stream enabled. The alias must meet the following criteria: The + alias must have a write index; All indices for the alias must have a `@timestamp` + field mapping of a `date` or `date_nanos` field type; The alias must not have + any filters; The alias must not use custom routing. If successful, the request + removes the alias and creates a data stream with the same name. The indices for + the alias become hidden backing indices for the stream. The write index for the + alias becomes the write index for the stream. ``_ @@ -2502,7 +2519,7 @@ def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Modifies a data stream + Performs one or more data stream modification actions in a single atomic operation. ``_ @@ -2564,7 +2581,7 @@ def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens an index. + Opens a closed index. For data streams, the API opens any closed backing indices. ``_ @@ -2698,7 +2715,7 @@ def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an alias. + Adds a data stream or index to an alias. ``_ @@ -2804,7 +2821,7 @@ def put_data_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the data stream lifecycle of the selected data streams. + Update the data lifecycle of the specified data streams. ``_ @@ -2907,7 +2924,8 @@ def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -3068,7 +3086,9 @@ def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index mappings. + Adds new fields to an existing data stream or index. You can also use this API + to change the search settings of existing fields. For data streams, these changes + are applied to all backing indices by default. ``_ @@ -3199,7 +3219,8 @@ def put_settings( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index settings. + Changes a dynamic index setting in real time. For data streams, index setting + changes are applied to all backing indices by default. ``_ @@ -3305,7 +3326,8 @@ def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an index template. + Creates or updates an index template. Index templates define settings, mappings, + and aliases that can be applied automatically to new indices. ``_ @@ -3385,7 +3407,9 @@ def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing index shard recoveries. + Returns information about ongoing and completed shard recoveries for one or more + indices. For data streams, the API returns information for the stream’s backing + indices. ``_ @@ -3447,7 +3471,9 @@ def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the refresh operation in one or more indices. + A refresh makes recent operations performed on one or more indices available + for search. For data streams, the API runs the refresh operation on the stream’s + backing indices. ``_ @@ -3582,7 +3608,8 @@ def resolve_cluster( ) -> ObjectApiResponse[t.Any]: """ Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. + including the local cluster, if included. Multiple patterns and remote clusters + are supported. ``_ @@ -3653,7 +3680,8 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about any matching indices, aliases, and data streams + Resolves the specified name(s) and/or index patterns for indices, aliases, and + data streams. Multiple patterns and remote clusters are supported. ``_ @@ -3717,8 +3745,7 @@ def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an alias to point to a new index when the existing index is considered - to be too large or too old. + Creates a new index for a data stream or index alias. ``_ @@ -3823,7 +3850,8 @@ def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides low-level information about segments in a Lucene index. + Returns low-level information about the Lucene segments in index shards. For + data streams, the API returns information about the stream’s backing indices. ``_ @@ -3902,7 +3930,8 @@ def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides store information for shard copies of indices. + Retrieves store information about replica shards in one or more indices. For + data streams, the API retrieves store information for the stream’s backing indices. ``_ @@ -3975,7 +4004,7 @@ def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allow to shrink an existing index into a new index with fewer primary shards. + Shrinks an existing index into a new index with fewer primary shards. ``_ @@ -4052,7 +4081,7 @@ def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate matching the given index name against the index templates in the system + ``_ @@ -4131,7 +4160,7 @@ def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate resolving the given template name or body + Returns the index configuration that would be applied by a particular index template. ``_ @@ -4264,7 +4293,7 @@ def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows you to split an existing index into a new index with more primary shards. + Splits an existing index into a new index with more primary shards. ``_ @@ -4356,7 +4385,8 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides statistics on operations happening in an index. + Returns statistics for one or more indices. For data streams, the API retrieves + statistics for the stream’s backing indices. ``_ @@ -4461,8 +4491,7 @@ def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. + Unfreezes an index. ``_ @@ -4538,7 +4567,7 @@ def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates index aliases. + Adds a data stream or index to an alias. ``_ @@ -4613,7 +4642,7 @@ def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a user to validate a potentially expensive query without executing it. + Validates a potentially expensive query without executing it. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 7427833b0..be4a9cfb5 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -36,8 +36,10 @@ def delete( str, ] ] = None, + dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -48,6 +50,10 @@ def delete( :param inference_id: The inference Id :param task_type: The task type + :param dry_run: When true, the endpoint is not deleted, and a list of ingest + processors which reference this endpoint is returned + :param force: When true, the inference endpoint is forcefully deleted even if + it is still being used by ingest processors or semantic text fields """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -64,10 +70,14 @@ def delete( else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if force is not None: + __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: @@ -162,7 +172,7 @@ def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference + Perform inference on the service ``_ @@ -245,7 +255,7 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Configure an inference endpoint for use in the Inference API + Create an inference endpoint ``_ diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index a286a8e1d..ec1d17e4f 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -40,7 +40,7 @@ def delete_pipeline( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline. + Deletes one or more existing ingest pipeline. ``_ @@ -89,7 +89,7 @@ def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistical information about geoip databases + Gets download statistics for GeoIP2 databases used with the geoip processor. ``_ """ @@ -129,7 +129,8 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a pipeline. + Returns information about one or more ingest pipelines. This API returns a local + reference of the pipeline. ``_ @@ -180,7 +181,10 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of the built-in patterns. + Extracts structured fields out of a single text field within a document. You + choose which field to extract matched fields from, as well as the grok pattern + you expect will match. A grok pattern is like a regular expression that supports + aliased expressions that can be reused. ``_ """ @@ -230,7 +234,8 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline. + Creates or updates an ingest pipeline. Changes made using this API take effect + immediately. ``_ @@ -316,7 +321,7 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to simulate a pipeline with example documents. + Executes an ingest pipeline against a set of provided documents. ``_ diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 19885503a..dead68945 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -72,7 +72,9 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves licensing information for the cluster + This API returns information about the type of license, when it was issued, and + when it expires, for example. For more information about the different types + of licenses, see https://www.elastic.co/subscriptions. ``_ @@ -248,7 +250,12 @@ def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an indefinite basic license. + The start basic API enables you to initiate an indefinite basic license, which + gives access to all the basic features. If the basic license does not support + all of the features that are available with your current license, however, you + are notified in the response. You must then re-submit the API request with the + acknowledge parameter set to true. To check the status of your basic license, + use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). ``_ @@ -290,7 +297,8 @@ def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - starts a limited time trial license. + The start trial API enables you to start a 30-day trial, which gives access to + all subscription features. ``_ diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 323a43f35..040d5e030 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -36,7 +36,7 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes Logstash Pipelines used by Central Management + Deletes a pipeline used for Logstash Central Management. ``_ @@ -76,7 +76,7 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves Logstash Pipelines used by Central Management + Retrieves pipelines used for Logstash Central Management. ``_ @@ -123,7 +123,7 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates Logstash Pipelines used for Central Management + Creates or updates a pipeline used for Logstash Central Management. ``_ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 90bfaf154..1338e1381 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -36,7 +36,11 @@ def clear_trained_model_deployment_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cached results from a trained model deployment + Clears a trained model deployment cache on all nodes where the trained model + is assigned. A trained model deployment may have an inference cache enabled. + As requests are handled by each allocated node, their responses may be cached + on that individual node. Calling this API clears the caches without restarting + the deployment. ``_ @@ -84,8 +88,19 @@ def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes one or more anomaly detection jobs. A job can be opened and closed multiple - times throughout its lifecycle. + Close anomaly detection jobs A job can be opened and closed multiple times throughout + its lifecycle. A closed job cannot receive data or perform analysis operations, + but you can still explore and navigate results. When you close a job, it runs + housekeeping tasks such as pruning the model history, flushing buffers, calculating + final results and persisting the model snapshots. Depending upon the size of + the job, it could take several minutes to close and the equivalent time to re-open. + After it is closed, the job has a minimal overhead on the cluster except for + maintaining its meta data. Therefore it is a best practice to close jobs that + are no longer required to process data. If you close an anomaly detection job + whose datafeed is running, the request first tries to stop the datafeed. This + behavior is equivalent to calling stop datafeed API with the same timeout and + force parameters as the close job request. When a datafeed that has a specified + end date stops, it automatically closes its associated job. ``_ @@ -146,7 +161,7 @@ def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a calendar. + Removes all scheduled events from a calendar, then deletes it. ``_ @@ -284,7 +299,7 @@ def delete_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing data frame analytics job. + Deletes a data frame analytics job. ``_ @@ -384,7 +399,13 @@ def delete_expired_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes expired and unused machine learning data. + Deletes expired and unused machine learning data. Deletes all job results, model + snapshots and forecast data that have exceeded their retention days period. Machine + learning state documents that are not associated with any job are also deleted. + You can limit the request to a single or set of anomaly detection jobs by using + a job identifier, a group name, a comma-separated list of jobs, or a wildcard + expression. You can delete expired data for all anomaly detection jobs by using + _all, by specifying * as the , or by omitting the . ``_ @@ -443,7 +464,9 @@ def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a filter. + Deletes a filter. If an anomaly detection job references the filter, you cannot + delete the filter. You must update or delete the job before you can delete the + filter. ``_ @@ -486,7 +509,10 @@ def delete_forecast( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes forecasts from a machine learning job. + Deletes forecasts from a machine learning job. By default, forecasts are retained + for 14 days. You can specify a different retention period with the `expires_in` + parameter in the forecast jobs API. The delete forecast API enables you to delete + one or more forecasts before they expire. ``_ @@ -553,7 +579,12 @@ def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing anomaly detection job. + Deletes an anomaly detection job. All job configuration, model state and results + are deleted. It is not currently possible to delete multiple jobs using wildcards + or a comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling the + delete datafeed API with the same timeout and force parameters as the delete + job request. ``_ @@ -607,7 +638,10 @@ def delete_model_snapshot( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing model snapshot. + Deletes an existing model snapshot. You cannot delete the active model snapshot. + To delete that snapshot, first revert to a different one. To identify the active + model snapshot, refer to the `model_snapshot_id` in the results from the get + jobs API. ``_ @@ -700,7 +734,9 @@ def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a model alias that refers to the trained model + Deletes a trained model alias. This API deletes an existing model alias that + refers to a trained model. If the model alias is missing or refers to a model + other than the one identified by the `model_id`, this API returns an error. ``_ @@ -755,7 +791,9 @@ def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimates the model memory + Makes an estimation of the memory usage for an anomaly detection job model. It + is based on analysis configuration details for the job and cardinality estimates + for the fields it references. ``_ @@ -820,7 +858,10 @@ def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates the data frame analytics for an annotated index. + Evaluates the data frame analytics for an annotated index. The API packages together + commonly used evaluation metrics for various types of machine learning features. + This has been designed for use on indexes created by data frame analytics. Evaluation + requires both a ground truth field and an analytics result field to be present. ``_ @@ -894,7 +935,13 @@ def explain_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explains a data frame analytics config. + Explains a data frame analytics config. This API provides explanations for a + data frame analytics config that either exists already or one that has not been + created yet. The following explanations are provided: * which fields are included + or not in the analysis and why, * how much memory is estimated to be required. + The estimate can be used when deciding the appropriate value for model_memory_limit + setting later on. If you have object fields or fields that are excluded via source + filtering, they are not included in the explanation. ``_ @@ -994,7 +1041,14 @@ def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. + Forces any buffered data to be processed by the job. The flush jobs API is only + applicable when sending data for analysis using the post data API. Depending + on the content of the buffer, then it might additionally calculate new results. + Both flush and close operations are similar, however the flush is more efficient + if you are expecting to send more data for analysis. When flushing, the job remains + open and is available to continue analyzing data. A close operation additionally + prunes and persists the model state to disk and the job must be opened again + before analyzing further data. ``_ @@ -1063,6 +1117,9 @@ def forecast( ) -> ObjectApiResponse[t.Any]: """ Predicts the future behavior of a time series by using its historical behavior. + Forecasts are not supported for jobs that perform population analysis; an error + occurs if you try to create a forecast for a job that has an `over_field_name` + in its configuration. ``_ @@ -1144,7 +1201,8 @@ def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more buckets. + Retrieves anomaly detection job results for one or more buckets. The API presents + a chronological view of the records, grouped by bucket. ``_ @@ -1460,7 +1518,9 @@ def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. + Retrieves configuration information for data frame analytics jobs. You can get + information for multiple data frame analytics jobs in a single API request by + using a comma-separated list of data frame analytics jobs or a wildcard expression. ``_ @@ -1597,7 +1657,12 @@ def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. + Retrieves usage information for datafeeds. You can get statistics for multiple + datafeeds in a single API request by using a comma-separated list of datafeeds + or a wildcard expression. You can get statistics for all datafeeds by using `_all`, + by specifying `*` as the ``, or by omitting the ``. If the + datafeed is stopped, the only information you receive is the `datafeed_id` and + the `state`. This API returns a maximum of 10,000 datafeeds. ``_ @@ -1653,7 +1718,11 @@ def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. + Retrieves configuration information for datafeeds. You can get information for + multiple datafeeds in a single API request by using a comma-separated list of + datafeeds or a wildcard expression. You can get information for all datafeeds + by using `_all`, by specifying `*` as the ``, or by omitting the ``. + This API returns a maximum of 10,000 datafeeds. ``_ @@ -1716,7 +1785,7 @@ def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. + Retrieves filters. You can get a single filter or all filters. ``_ @@ -1778,7 +1847,10 @@ def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more influencers. + Retrieves anomaly detection job results for one or more influencers. Influencers + are the entities that have contributed to, or are to blame for, the anomalies. + Influencer results are available only if an `influencer_field_name` is specified + in the job configuration. ``_ @@ -1916,7 +1988,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. + Retrieves configuration information for anomaly detection jobs. You can get information + for multiple anomaly detection jobs in a single API request by using a group + name, a comma-separated list of jobs, or a wildcard expression. You can get information + for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, + or by omitting the ``. ``_ @@ -1979,7 +2055,9 @@ def get_memory_stats( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on how ML is using memory. + Get information about how machine learning jobs and trained models are using + memory, on each node, both within the JVM heap, and natively, outside of the + JVM. ``_ @@ -2034,7 +2112,7 @@ def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets stats for anomaly detection job model snapshot upgrades that are in progress. + Retrieves usage information for anomaly detection job model snapshot upgrades. ``_ @@ -2207,7 +2285,18 @@ def get_overall_buckets( ) -> ObjectApiResponse[t.Any]: """ Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. + anomaly detection jobs. The `overall_score` is calculated by combining the scores + of all the buckets within the overall bucket span. First, the maximum `anomaly_score` + per anomaly detection job in the overall bucket is calculated. Then the `top_n` + of those scores are averaged to result in the `overall_score`. This means that + you can fine-tune the `overall_score` so that it is more or less sensitive to + the number of jobs that detect an anomaly at the same time. For example, if you + set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall + bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` + is high only when all jobs detect anomalies in that overall bucket. If you set + the `bucket_span` parameter (to a value greater than its default), the `overall_score` + is the maximum `overall_score` of the overall buckets that have a span equal + to the jobs' largest bucket span. ``_ @@ -2304,7 +2393,15 @@ def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly records for an anomaly detection job. + Retrieves anomaly records for an anomaly detection job. Records contain the detailed + analytical results. They describe the anomalous activity that has been identified + in the input data based on the detector configuration. There can be many anomaly + records depending on the characteristics and size of the input data. In practice, + there are often too many to be able to manually process them. The machine learning + features therefore perform a sophisticated aggregation of the anomaly records + into buckets. The number of record results depends on the number of anomalies + found in each bucket, which relates to the number of time series being modeled + and the number of detectors. ``_ @@ -2374,7 +2471,7 @@ def get_records( def get_trained_models( self, *, - model_id: t.Optional[str] = None, + model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -2390,14 +2487,16 @@ def get_trained_models( ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, - tags: t.Optional[str] = None, + tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained inference model. + Retrieves configuration information for a trained model. ``_ - :param model_id: The unique identifier of the trained model. + :param model_id: The unique identifier of the trained model or a model alias. + You can get information for multiple trained models in a single API request + by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions @@ -2473,7 +2572,9 @@ def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained inference models. + Retrieves usage information for trained models. You can get usage information + for multiple trained models in a single API request by using a comma-separated + list of model IDs or a wildcard expression. ``_ @@ -2536,7 +2637,7 @@ def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + Evaluates a trained model. ``_ @@ -2593,7 +2694,12 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns defaults and limits used by machine learning. + Returns defaults and limits used by machine learning. This endpoint is designed + to be used by a user interface that needs to fully understand machine learning + configurations where some options are not specified, meaning that the defaults + should be used. This endpoint may be used to find out what those defaults are. + It also provides information about the maximum size of machine learning jobs + that could run in the current cluster configuration. ``_ """ @@ -2633,7 +2739,12 @@ def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens one or more anomaly detection jobs. + Opens one or more anomaly detection jobs. An anomaly detection job must be opened + in order for it to be ready to receive and analyze data. It can be opened and + closed multiple times throughout its lifecycle. When you open a new job, it starts + with an empty model. When you open an existing job, the most recent model state + is automatically loaded. The job is ready to resume its analysis from where it + left off, once new data is received. ``_ @@ -2687,7 +2798,7 @@ def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Posts scheduled events in a calendar. + Adds scheduled events to a calendar. ``_ @@ -2743,7 +2854,9 @@ def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sends data to an anomaly detection job for analysis. + Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, + data can be accepted from only a single connection at a time. It is not currently + possible to post data to multiple jobs using wildcards or a comma-separated list. ``_ @@ -2806,7 +2919,7 @@ def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews that will be analyzed given a data frame analytics config. + Previews the extracted features used by a data frame analytics config. ``_ @@ -2868,7 +2981,15 @@ def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. + Previews a datafeed. This API returns the first "page" of search results from + a datafeed. You can preview an existing datafeed or provide configuration details + for a datafeed and anomaly detection job in the API. The preview shows the structure + of the data that will be passed to the anomaly detection engine. IMPORTANT: When + Elasticsearch security features are enabled, the preview uses the credentials + of the user that called the API. However, when the datafeed starts it uses the + roles of the last user that created or updated the datafeed. To get a preview + that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials. ``_ @@ -2944,7 +3065,7 @@ def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a calendar. + Creates a calendar. ``_ @@ -2991,7 +3112,7 @@ def put_calendar_job( self, *, calendar_id: str, - job_id: str, + job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -3070,7 +3191,9 @@ def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. + Instantiates a data frame analytics job. This API creates a data frame analytics + job that performs an analysis on the source indices and stores the outcome in + a destination index. ``_ @@ -3238,7 +3361,17 @@ def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. + Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis + by an anomaly detection job. You can associate only one datafeed with each anomaly + detection job. The datafeed contains a query that runs at a defined interval + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. When Elasticsearch security features are enabled, your datafeed + remembers which roles the user who created it had at the time of creation and + runs the query using those same roles. If you provide secondary authorization + headers, those credentials are used instead. You must use Kibana, this API, or + the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the `.ml-config` index. Do not give users `write` privileges on the + `.ml-config` index. ``_ @@ -3391,7 +3524,9 @@ def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. + Instantiates a filter. A filter contains a list of strings. It can be used by + one or more anomaly detection jobs. Specifically, filters are referenced in the + `custom_rules` property of detector configuration objects. ``_ @@ -3477,7 +3612,8 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates an anomaly detection job. + Instantiates an anomaly detection job. If you include a `datafeed_config`, you + must have read index privileges on the source index. ``_ @@ -3657,7 +3793,7 @@ def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an inference trained model. + Enables you to supply a trained model that is not created by data frame analytics. ``_ @@ -3759,8 +3895,19 @@ def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new model alias (or reassigns an existing one) to refer to the trained - model + Creates or updates a trained model alias. A trained model alias is a logical + name used to reference a single trained model. You can use aliases instead of + trained model identifiers to make it easier to reference your models. For example, + you can use aliases in inference aggregations and processors. An alias must be + unique and refer to only a single trained model. However, you can have multiple + aliases for each trained model. If you use this API to update an alias such that + it references a different trained model ID and the model uses a different type + of data frame analytics, an error occurs. For example, this situation occurs + if you have a trained model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API returns + a warning. ``_ @@ -3818,7 +3965,7 @@ def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition + Creates part of a trained model definition. ``_ @@ -3895,7 +4042,9 @@ def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary + Creates a trained model vocabulary. This API is supported only for natural language + processing (NLP) models. The vocabulary is stored in the index as described in + `inference_config.*.vocabulary` of the trained model definition. ``_ @@ -3951,7 +4100,9 @@ def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing anomaly detection job. + Resets an anomaly detection job. All model state and results are deleted. The + job is ready to start over as if it had just been created. It is not currently + possible to reset multiple jobs using wildcards or a comma separated list. ``_ @@ -4005,7 +4156,13 @@ def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reverts to a specific snapshot. + Reverts to a specific snapshot. The machine learning features react quickly to + anomalous input, learning new behaviors in data. Highly anomalous input increases + the variance in the models whilst the system learns whether this is a new step-change + in behavior or a one-off event. In the case where this anomalous input is known + to be a one-off, then it might be appropriate to reset the model state to a time + before this event. For example, you might consider reverting to a saved snapshot + after Black Friday or a critical system failure. ``_ @@ -4066,7 +4223,15 @@ def set_upgrade_mode( ) -> ObjectApiResponse[t.Any]: """ Sets a cluster wide upgrade_mode setting that prepares machine learning indices - for an upgrade. + for an upgrade. When upgrading your cluster, in some circumstances you must restart + your nodes and reindex your machine learning indices. In those circumstances, + there must be no machine learning jobs running. You can close the machine learning + jobs, do the upgrade, then open all the jobs again. Alternatively, you can use + this API to temporarily halt tasks associated with the jobs and datafeeds and + prevent new jobs from opening. You can also use this API during upgrades that + do not require you to reindex your machine learning indices, though stopping + jobs is not a requirement in that case. You can see the current value for the + upgrade_mode setting by using the get machine learning info API. ``_ @@ -4112,7 +4277,16 @@ def start_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. + Starts a data frame analytics job. A data frame analytics job can be started + and stopped multiple times throughout its lifecycle. If the destination index + does not exist, it is created automatically the first time you start the data + frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` + settings for the destination index are copied from the source index. If there + are multiple source indices, the destination index copies the highest setting + values. The mappings for the destination index are also copied from the source + indices. If there are any mapping conflicts, the job fails to start. If the destination + index exists, it is used as is. You can therefore set up the destination index + in advance with custom settings and mappings. ``_ @@ -4164,7 +4338,17 @@ def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. + Starts one or more datafeeds. A datafeed must be started in order to retrieve + data from Elasticsearch. A datafeed can be started and stopped multiple times + throughout its lifecycle. Before you can start a datafeed, the anomaly detection + job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, + it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, + it will be ignored. When Elasticsearch security features are enabled, your datafeed + remembers which roles the last user to create or update it had at the time of + creation or update and runs the query using those same roles. If you provided + secondary authorization headers when you created or updated the datafeed, those + credentials are used instead. ``_ @@ -4233,7 +4417,8 @@ def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -4316,7 +4501,8 @@ def stop_data_frame_analytics( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. + Stops one or more data frame analytics jobs. A data frame analytics job can be + started and stopped multiple times throughout its lifecycle. ``_ @@ -4381,7 +4567,9 @@ def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. + Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data + from Elasticsearch. A datafeed can be started and stopped multiple times throughout + its lifecycle. ``_ @@ -4443,7 +4631,7 @@ def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + Stops a trained model deployment. ``_ @@ -4508,7 +4696,7 @@ def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a data frame analytics job. + Updates an existing data frame analytics job. ``_ @@ -4616,7 +4804,11 @@ def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a datafeed. + Updates the properties of a datafeed. You must stop and start the datafeed for + the changes to be applied. When Elasticsearch security features are enabled, + your datafeed remembers which roles the user who updated it had at the time of + the update and runs the query using those same roles. If you provide secondary + authorization headers, those credentials are used instead. ``_ @@ -4779,7 +4971,7 @@ def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items. + Updates the description of a filter, adds items, or removes items from the list. ``_ @@ -5058,7 +5250,8 @@ def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of trained model deployment. + Starts a trained model deployment, which allocates the model to every machine + learning node. ``_ @@ -5088,7 +5281,11 @@ def update_trained_model_deployment( if not __body: if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations - __headers = {"accept": "application/json", "content-type": "application/json"} + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, @@ -5113,7 +5310,13 @@ def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades a given job snapshot to the current major version. + Upgrades an anomaly detection model snapshot to the latest major version. Over + time, older snapshot formats are deprecated and removed. Anomaly detection jobs + support only snapshots that are from the current or previous major version. This + API provides a means to upgrade a snapshot to the current major version. This + aids in preparing the cluster for an upgrade to the next major version. Only + one snapshot per anomaly detection job can be upgraded at a time and the upgraded + snapshot cannot be the current snapshot of the anomaly detection job. ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 56ae5a34a..c55d30c30 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -37,7 +37,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the archived repositories metering information present in the cluster. + You can use this API to clear the archived repositories metering information + in the cluster. ``_ @@ -85,7 +86,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster repositories metering information. + You can use the cluster repositories metering API to retrieve repositories metering + information in a cluster. This API exposes monotonically non-decreasing counters + and it’s expected that clients would durably store the information needed to + compute aggregations over a period of time. Additionally, the information exposed + by this API is volatile, meaning that it won’t be present after node restarts. ``_ @@ -140,7 +145,8 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - Returns information about hot threads on each node in the cluster. + This API yields a breakdown of the hot threads on each selected node in the cluster. + The output is plain text with a breakdown of each node’s top hot threads. ``_ @@ -217,7 +223,7 @@ def info( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about nodes in the cluster. + Returns cluster nodes information. ``_ @@ -286,7 +292,7 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads secure settings. + Reloads the keystore on nodes in the cluster. ``_ @@ -359,7 +365,7 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistical information about nodes in the cluster. + Returns cluster nodes statistics. ``_ @@ -476,7 +482,7 @@ def usage( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about REST actions usage on nodes. + Returns information on the usage of features. ``_ diff --git a/elasticsearch/_sync/client/query_ruleset.py b/elasticsearch/_sync/client/query_ruleset.py index d2923992b..f4f7b59fc 100644 --- a/elasticsearch/_sync/client/query_ruleset.py +++ b/elasticsearch/_sync/client/query_ruleset.py @@ -76,7 +76,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset. + Returns the details about a query ruleset ``_ @@ -119,7 +119,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists query rulesets. + Returns summarized information about existing query rulesets. ``_ diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 850228997..470fd23b3 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -165,8 +165,8 @@ def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the - index where rollup data is stored). + Returns the rollup capabilities of all jobs inside of a rollup index (for example, + the index where rollup data is stored). ``_ @@ -344,7 +344,7 @@ def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard query DSL. + Enables searching rolled-up data using the standard Query DSL. ``_ diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index b4502e3c9..e6483fe8e 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -116,7 +116,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application. + Returns the details about a search application ``_ @@ -353,7 +353,7 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application + Perform a search against a search application. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index e4cf338c8..b4ce94f75 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -44,7 +44,7 @@ def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates the user profile on behalf of another user. + Creates or updates a user profile on behalf of another user. ``_ @@ -97,8 +97,12 @@ def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables authentication as a user and retrieve information about the authenticated - user. + Enables you to submit a request with a basic auth header to authenticate a user + and retrieve information about the authenticated user. A successful call returns + a JSON structure that shows user information such as their username, the roles + that are assigned to the user, any assigned metadata, and information about the + realms that authenticated and authorized the user. If the user cannot be authenticated, + this API returns a 401 status code. ``_ """ @@ -204,7 +208,8 @@ def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear a subset or all entries from the API key cache. + Evicts a subset of all entries from the API key cache. The cache is also automatically + cleared on state changes of the security index. ``_ @@ -431,7 +436,11 @@ def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key for access without requiring basic authentication. + Creates an API key for access without requiring basic authentication. A successful + request returns a JSON structure that contains the API key, its unique id, and + its name. If applicable, it also returns expiration information for the API key + in milliseconds. NOTE: By default, API keys never expire. You can specify expiration + information when you create the API keys. ``_ @@ -503,7 +512,7 @@ def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service account token for access without requiring basic authentication. + Creates a service accounts token for access without requiring basic authentication. ``_ @@ -1021,8 +1030,8 @@ def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a kibana instance to configure itself to communicate with a secured elasticsearch - cluster. + Enables a Kibana instance to configure itself for communication with a secured + Elasticsearch cluster. ``_ """ @@ -1057,7 +1066,7 @@ def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to enroll to an existing cluster with security enabled. + Allows a new node to join an existing cluster with security features enabled. ``_ """ @@ -1100,7 +1109,10 @@ def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for one or more API keys. + Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` + privilege, this API returns only the API keys that you own. If you have `read_security`, + `manage_api_key` or greater privileges (including `manage_security`), this API + returns all API keys regardless of ownership. ``_ @@ -1259,7 +1271,9 @@ def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather + than using file-based role management. The get roles API cannot retrieve roles + that are defined in roles files. ``_ @@ -1352,7 +1366,7 @@ def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about service accounts. + This API returns a list of service accounts that match the provided path parameter(s). ``_ @@ -1629,7 +1643,7 @@ def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves user profiles for the given unique ID(s). + Retrieves a user's profile using the unique profile ID. ``_ @@ -1693,7 +1707,21 @@ def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. + Creates an API key on behalf of another user. This API is similar to Create API + keys, however it creates the API key for a user that is different than the user + that runs the API. The caller must have authentication credentials (either an + access token, or a username and password) for the user on whose behalf the API + key will be created. It is not possible to use this API to create an API key + without that user’s credentials. The user, for whom the authentication credentials + is provided, can optionally "run as" (impersonate) another user. In this case, + the API key will be created on behalf of the impersonated user. This API is intended + be used by applications that need to create and manage API keys for end users, + but cannot guarantee that those users have permission to create API keys on their + own behalf. A successful grant API key API call returns a JSON structure that + contains the API key, its unique id, and its name. If applicable, it also returns + expiration information for the API key in milliseconds. By default, API keys + never expire. You can specify expiration information when you create the API + keys. ``_ @@ -1893,7 +1921,13 @@ def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more API keys. + Invalidates one or more API keys. The `manage_api_key` privilege allows deleting + any API keys. The `manage_own_api_key` only allows deleting API keys that are + owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation + request must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user’s identity. - Or, + if the request is issued by an API key, i.e. an API key invalidates itself, specify + its ID in the `ids` field. ``_ @@ -2104,7 +2138,9 @@ def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather + than using file-based role management. The create or update roles API cannot + update roles that are defined in roles files. ``_ @@ -2122,6 +2158,9 @@ def put_role( this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param run_as: A list of users that the owners of this role can impersonate. + *Note*: in Serverless, the run-as feature is disabled. For API compatibility, + you can still specify an empty `run_as` field, but a non-empty list will + be rejected. :param transient_metadata: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given @@ -2386,7 +2425,8 @@ def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for API keys using a subset of query DSL + Retrieves information for API keys in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -2502,8 +2542,7 @@ def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Exchanges a SAML Response message for an Elasticsearch access token and refresh - token pair + Submits a SAML Response message to Elasticsearch for consumption. ``_ @@ -2565,7 +2604,7 @@ def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP + Verifies the logout response sent from the SAML IdP. ``_ @@ -2631,7 +2670,7 @@ def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Consumes a SAML LogoutRequest + Submits a SAML LogoutRequest message to Elasticsearch for consumption. ``_ @@ -2698,8 +2737,7 @@ def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates an access token and a refresh token that were generated via the SAML - Authenticate API + Submits a request to invalidate an access token and refresh token. ``_ @@ -2756,7 +2794,8 @@ def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request + Creates a SAML authentication request () as a URL string, based + on the configuration of the respective SAML realm in Elasticsearch. ``_ @@ -2811,7 +2850,7 @@ def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider + Generate SAML metadata for a SAML 2.0 Service Provider. ``_ @@ -2926,7 +2965,22 @@ def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates attributes of an existing API key. + Updates attributes of an existing API key. Users can only update API keys that + they created or that were granted to them. Use this API to update API keys created + by the create API Key or grant API Key APIs. If you need to apply the same update + to many API keys, you can use bulk update API Keys to reduce overhead. It’s not + possible to update expired API keys, or API keys that have been invalidated by + invalidate API Key. This API supports updates to an API key’s access scope and + metadata. The access scope of an API key is derived from the `role_descriptors` + you specify in the request, and a snapshot of the owner user’s permissions at + the time of the request. The snapshot of the owner’s permissions is updated automatically + on every call. If you don’t specify `role_descriptors` in the request, a call + to this API might still change the API key’s access scope. This change can occur + if the owner user’s permissions have changed since the API key was created or + last modified. To update another user’s API key, use the `run_as` feature to + submit a request on behalf of another user. IMPORTANT: It’s not possible to use + an API key as the authentication credential for this API. To update an API key, + the owner user’s credentials are required. ``_ @@ -3001,7 +3055,8 @@ def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update application specific data for the user profile of the given unique ID. + Updates specific data for the user profile that's associated with the specified + unique ID. ``_ diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index ce374356e..80793e366 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -40,7 +40,8 @@ def cleanup_repository( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes stale data from repository. + Triggers the review of a snapshot repository’s contents and deletes any stale + data not referenced by existing snapshots. ``_ diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index be3b30e95..2455c3f3d 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -270,7 +270,7 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonyms set + Creates or updates a synonym set. ``_ diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index b6dad4097..4db15d817 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -158,7 +158,8 @@ def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a list of tasks. + The task management API returns information about tasks currently executing on + one or more nodes in the cluster. ``_ diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 74aa65f0c..6eac80a30 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -39,7 +39,7 @@ def delete_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing transform. + Deletes a transform. ``_ @@ -249,7 +249,10 @@ def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a transform. + Previews a transform. It returns a maximum of 100 results. The calculations are + based on all the current data in the source index. It also generates a list of + mappings and settings for the destination index. These values are determined + based on the field types of the source index and the transform aggregations. ``_ @@ -366,7 +369,26 @@ def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a transform. + Creates a transform. A transform copies data from source indices, transforms + it, and persists it into an entity-centric destination index. You can also think + of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a + hash of the entity, so there is a unique row per entity. You must choose either + the latest or pivot method for your transform; you cannot use both in a single + transform. If you choose to use the pivot method for your transform, the entities + are defined by the set of `group_by` fields in the pivot object. If you choose + to use the latest method, the entities are defined by the `unique_key` field + values in the latest object. You must have `create_index`, `index`, and `read` + privileges on the destination index and `read` and `view_index_metadata` privileges + on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation + and uses those same roles. If those roles do not have the required privileges + on the source and destination indices, the transform fails when it attempts unauthorized + operations. NOTE: You must use Kibana or this API to create a transform. Do not + add a transform directly into any `.transform-internal*` indices using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + any privileges on `.transform-internal*` indices. If you used transforms prior + to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. ``_ @@ -466,7 +488,9 @@ def reset_transform( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an existing transform. + Resets a transform. Before you can reset it, you must stop it; alternatively, + use the `force` query parameter. If the destination index was created by the + transform, it is deleted. ``_ @@ -514,7 +538,10 @@ def schedule_now_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedules now a transform. + Schedules now a transform. If you _schedule_now a transform, it will process + the new data instantly, without waiting for the configured frequency interval. + After _schedule_now API is called, the transform will be processed again at now + + frequency unless _schedule_now API is called again in the meantime. ``_ @@ -561,7 +588,23 @@ def start_transform( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more transforms. + Starts a transform. When you start a transform, it creates the destination index + if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` + is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions + for the destination index from the source indices and the transform aggregations. + If fields in the destination index are derived from scripts (as in the case of + `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic + mappings unless an index template exists. If it is a latest transform, it does + not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, + create the destination index before you start the transform. Alternatively, you + can create an index template, though it does not affect the deduced mappings + in a pivot transform. When the transform starts, a series of validations occur + to ensure its success. If you deferred validation when you created the transform, + they occur when you start the transform—​with the exception of privilege checks. + When Elasticsearch security features are enabled, the transform remembers which + roles the user that created it had at the time of creation and uses those same + roles. If those roles do not have the required privileges on the source and destination + indices, the transform fails when it attempts unauthorized operations. ``_ @@ -708,7 +751,13 @@ def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a transform. + Updates certain properties of a transform. All updated properties except `description` + do not take effect until after the transform starts the next checkpoint, thus + there is data consistency in each checkpoint. To use this API, you must have + `read` and `view_index_metadata` privileges for the source indices. You must + also have `index` and `read` privileges for the destination index. When Elasticsearch + security features are enabled, the transform remembers which roles the user who + updated it had at the time of update and runs with those privileges. ``_ @@ -789,7 +838,13 @@ def upgrade_transforms( timeout: t.Optional[t.Union["t.Literal[-1]", "t.Literal[0]", str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. + Upgrades all transforms. This API identifies transforms that have a legacy configuration + format and upgrades them to the latest version. It also cleans up the internal + data structures that store the transform state and checkpoints. The upgrade does + not affect the source and destination indices. The upgrade also does not affect + the roles that transforms use when Elasticsearch security features are enabled; + the role used to read source data and write to the destination index remains + unchanged. ``_ diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 5f7291dbd..7fa2f2603 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -235,7 +235,13 @@ def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces the execution of a stored watch. + This API can be used to force execution of the watch outside of its triggering + logic or to simulate the watch execution for debugging purposes. For testing + and debugging purposes, you also have fine-grained control on how the watch runs. + You can execute the watch without executing all of its actions or alternatively + by simulating them. You can also force execution by ignoring the watch condition + and control whether a watch record would be written to the watch history after + execution. ``_ diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 9ab1989a1..29bd2a040 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -41,7 +41,7 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the installed X-Pack features. + Provides general information about the installed X-Pack features. ``_ @@ -87,7 +87,8 @@ def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information about the installed X-Pack features. + This API provides information about which features are currently enabled and + available under the current license and some usage statistics. ``_