Skip to content

Auto-generated code for 8.x #2728

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions elasticsearch/_async/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,12 +626,14 @@ async def bulk(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
list_executed_pipelines: t.Optional[bool] = None,
pipeline: t.Optional[str] = None,
pretty: t.Optional[bool] = None,
refresh: t.Optional[
t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
] = None,
require_alias: t.Optional[bool] = None,
require_data_stream: t.Optional[bool] = None,
routing: t.Optional[str] = None,
source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
Expand All @@ -651,6 +653,8 @@ async def bulk(
:param operations:
:param index: Name of the data stream, index, or index alias to perform bulk
actions on.
:param list_executed_pipelines: If `true`, the response will include the ingest
pipelines that were executed for each index or create.
:param pipeline: ID of the pipeline to use to preprocess incoming documents.
If the index has a default ingest pipeline specified, then setting the value
to `_none` disables the default ingest pipeline for this request. If a final
Expand All @@ -661,6 +665,8 @@ async def bulk(
make this operation visible to search, if `false` do nothing with refreshes.
Valid values: `true`, `false`, `wait_for`.
:param require_alias: If `true`, the request’s actions must target an index alias.
:param require_data_stream: If `true`, the request's actions must target a data
stream (existing or to-be-created).
:param routing: Custom value used to route operations to a specific shard.
:param source: `true` or `false` to return the `_source` field or not, or a list
of fields to return.
Expand Down Expand Up @@ -694,6 +700,8 @@ async def bulk(
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if list_executed_pipelines is not None:
__query["list_executed_pipelines"] = list_executed_pipelines
if pipeline is not None:
__query["pipeline"] = pipeline
if pretty is not None:
Expand All @@ -702,6 +710,8 @@ async def bulk(
__query["refresh"] = refresh
if require_alias is not None:
__query["require_alias"] = require_alias
if require_data_stream is not None:
__query["require_data_stream"] = require_data_stream
if routing is not None:
__query["routing"] = routing
if source is not None:
Expand Down
259 changes: 64 additions & 195 deletions elasticsearch/_async/client/cat.py

Large diffs are not rendered by default.

173 changes: 128 additions & 45 deletions elasticsearch/_async/client/ccr.py

Large diffs are not rendered by default.

237 changes: 237 additions & 0 deletions elasticsearch/_async/client/connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,6 +589,125 @@ async def sync_job_cancel(
path_parts=__path_parts,
)

@_rewrite_parameters()
@_stability_warning(Stability.EXPERIMENTAL)
async def sync_job_check_in(
self,
*,
connector_sync_job_id: str,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
) -> ObjectApiResponse[t.Any]:
"""
Check in a connector sync job. Check in a connector sync job and set the `last_seen`
field to the current time before updating it in the internal index. To sync data
using self-managed connectors, you need to deploy the Elastic connector service
on your own infrastructure. This service runs automatically on Elastic Cloud
for Elastic managed connectors.

`<https://www.elastic.co/guide/en/elasticsearch/reference/8.16/check-in-connector-sync-job-api.html>`_

:param connector_sync_job_id: The unique identifier of the connector sync job
to be checked in.
"""
if connector_sync_job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'connector_sync_job_id'")
__path_parts: t.Dict[str, str] = {
"connector_sync_job_id": _quote(connector_sync_job_id)
}
__path = (
f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in'
)
__query: t.Dict[str, t.Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
__headers = {"accept": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
endpoint_id="connector.sync_job_check_in",
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=("worker_hostname", "sync_cursor"),
)
@_stability_warning(Stability.EXPERIMENTAL)
async def sync_job_claim(
self,
*,
connector_sync_job_id: str,
worker_hostname: t.Optional[str] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
sync_cursor: t.Optional[t.Any] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
Claim a connector sync job. This action updates the job status to `in_progress`
and sets the `last_seen` and `started_at` timestamps to the current time. Additionally,
it can set the `sync_cursor` property for the sync job. This API is not intended
for direct connector management by users. It supports the implementation of services
that utilize the connector protocol to communicate with Elasticsearch. To sync
data using self-managed connectors, you need to deploy the Elastic connector
service on your own infrastructure. This service runs automatically on Elastic
Cloud for Elastic managed connectors.

`<https://www.elastic.co/guide/en/elasticsearch/reference/8.16/claim-connector-sync-job-api.html>`_

:param connector_sync_job_id: The unique identifier of the connector sync job.
:param worker_hostname: The host name of the current system that will run the
job.
:param sync_cursor: The cursor object from the last incremental sync job. This
should reference the `sync_cursor` field in the connector state for which
the job runs.
"""
if connector_sync_job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'connector_sync_job_id'")
if worker_hostname is None and body is None:
raise ValueError("Empty value passed for parameter 'worker_hostname'")
__path_parts: t.Dict[str, str] = {
"connector_sync_job_id": _quote(connector_sync_job_id)
}
__path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if not __body:
if worker_hostname is not None:
__body["worker_hostname"] = worker_hostname
if sync_cursor is not None:
__body["sync_cursor"] = sync_cursor
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="connector.sync_job_claim",
path_parts=__path_parts,
)

@_rewrite_parameters()
@_stability_warning(Stability.BETA)
async def sync_job_delete(
Expand Down Expand Up @@ -634,6 +753,64 @@ async def sync_job_delete(
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=("error",),
)
@_stability_warning(Stability.EXPERIMENTAL)
async def sync_job_error(
self,
*,
connector_sync_job_id: str,
error: t.Optional[str] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
Set a connector sync job error. Set the `error` field for a connector sync job
and set its `status` to `error`. To sync data using self-managed connectors,
you need to deploy the Elastic connector service on your own infrastructure.
This service runs automatically on Elastic Cloud for Elastic managed connectors.

`<https://www.elastic.co/guide/en/elasticsearch/reference/8.16/set-connector-sync-job-error-api.html>`_

:param connector_sync_job_id: The unique identifier for the connector sync job.
:param error: The error for the connector sync job error field.
"""
if connector_sync_job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'connector_sync_job_id'")
if error is None and body is None:
raise ValueError("Empty value passed for parameter 'error'")
__path_parts: t.Dict[str, str] = {
"connector_sync_job_id": _quote(connector_sync_job_id)
}
__path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if not __body:
if error is not None:
__body["error"] = error
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="connector.sync_job_error",
path_parts=__path_parts,
)

@_rewrite_parameters()
@_stability_warning(Stability.BETA)
async def sync_job_get(
Expand Down Expand Up @@ -1032,6 +1209,66 @@ async def update_error(
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=("features",),
)
@_stability_warning(Stability.EXPERIMENTAL)
async def update_features(
self,
*,
connector_id: str,
features: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
Update the connector features. Update the connector features in the connector
document. This API can be used to control the following aspects of a connector:
* document-level security * incremental syncs * advanced sync rules * basic sync
rules Normally, the running connector service automatically manages these features.
However, you can use this API to override the default behavior. To sync data
using self-managed connectors, you need to deploy the Elastic connector service
on your own infrastructure. This service runs automatically on Elastic Cloud
for Elastic managed connectors.

`<https://www.elastic.co/guide/en/elasticsearch/reference/8.16/update-connector-features-api.html>`_

:param connector_id: The unique identifier of the connector to be updated.
:param features:
"""
if connector_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'connector_id'")
if features is None and body is None:
raise ValueError("Empty value passed for parameter 'features'")
__path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)}
__path = f'/_connector/{__path_parts["connector_id"]}/_features'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if not __body:
if features is not None:
__body["features"] = features
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="connector.update_features",
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=("advanced_snippet", "filtering", "rules"),
)
Expand Down
21 changes: 21 additions & 0 deletions elasticsearch/_async/client/eql.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,13 +167,16 @@ async def get_status(
@_rewrite_parameters(
body_fields=(
"query",
"allow_partial_search_results",
"allow_partial_sequence_results",
"case_sensitive",
"event_category_field",
"fetch_size",
"fields",
"filter",
"keep_alive",
"keep_on_completion",
"max_samples_per_key",
"result_position",
"runtime_mappings",
"size",
Expand All @@ -188,6 +191,8 @@ async def search(
index: t.Union[str, t.Sequence[str]],
query: t.Optional[str] = None,
allow_no_indices: t.Optional[bool] = None,
allow_partial_search_results: t.Optional[bool] = None,
allow_partial_sequence_results: t.Optional[bool] = None,
case_sensitive: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
event_category_field: t.Optional[str] = None,
Expand All @@ -211,6 +216,7 @@ async def search(
ignore_unavailable: t.Optional[bool] = None,
keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
keep_on_completion: t.Optional[bool] = None,
max_samples_per_key: t.Optional[int] = None,
pretty: t.Optional[bool] = None,
result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None,
runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
Expand All @@ -232,6 +238,8 @@ async def search(
:param index: The name of the index to scope the operation
:param query: EQL query you wish to run.
:param allow_no_indices:
:param allow_partial_search_results:
:param allow_partial_sequence_results:
:param case_sensitive:
:param event_category_field: Field containing the event classification, such
as process, file, or network.
Expand All @@ -246,6 +254,11 @@ async def search(
in the response.
:param keep_alive:
:param keep_on_completion:
:param max_samples_per_key: By default, the response of a sample query contains
up to `10` samples, with one sample per unique set of join keys. Use the
`size` parameter to get a smaller or larger set of samples. To retrieve more
than one sample per set of join keys, use the `max_samples_per_key` parameter.
Pipes are not supported for sample queries.
:param result_position:
:param runtime_mappings:
:param size: For basic queries, the maximum number of matching events to return.
Expand Down Expand Up @@ -280,6 +293,12 @@ async def search(
if not __body:
if query is not None:
__body["query"] = query
if allow_partial_search_results is not None:
__body["allow_partial_search_results"] = allow_partial_search_results
if allow_partial_sequence_results is not None:
__body["allow_partial_sequence_results"] = (
allow_partial_sequence_results
)
if case_sensitive is not None:
__body["case_sensitive"] = case_sensitive
if event_category_field is not None:
Expand All @@ -294,6 +313,8 @@ async def search(
__body["keep_alive"] = keep_alive
if keep_on_completion is not None:
__body["keep_on_completion"] = keep_on_completion
if max_samples_per_key is not None:
__body["max_samples_per_key"] = max_samples_per_key
if result_position is not None:
__body["result_position"] = result_position
if runtime_mappings is not None:
Expand Down
Loading
Loading