From d95fc105d6956fbd2d36ec65c06ad71bd6aa0c13 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 17 Nov 2023 06:03:43 +0000 Subject: [PATCH] Auto-generated API code --- elasticsearch/_async/client/ml.py | 4 ++++ elasticsearch/_sync/client/ml.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index 3116a71c7..dbe218dfb 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -3582,6 +3582,7 @@ async def start_trained_model_deployment( *, model_id: str, cache_size: t.Optional[t.Union[int, str]] = None, + deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -3605,6 +3606,7 @@ async def start_trained_model_deployment( :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. + :param deployment_id: A unique identifier for the deployment of the model. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. @@ -3631,6 +3633,8 @@ async def start_trained_model_deployment( __query: t.Dict[str, t.Any] = {} if cache_size is not None: __query["cache_size"] = cache_size + if deployment_id is not None: + __query["deployment_id"] = deployment_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index bb72ef725..4af429e71 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -3582,6 +3582,7 @@ def start_trained_model_deployment( *, model_id: str, cache_size: t.Optional[t.Union[int, str]] = None, + deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -3605,6 +3606,7 @@ def start_trained_model_deployment( :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. + :param deployment_id: A unique identifier for the deployment of the model. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. @@ -3631,6 +3633,8 @@ def start_trained_model_deployment( __query: t.Dict[str, t.Any] = {} if cache_size is not None: __query["cache_size"] = cache_size + if deployment_id is not None: + __query["deployment_id"] = deployment_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: