diff --git a/docs/dyn/aiplatform_v1.endpoints.html b/docs/dyn/aiplatform_v1.endpoints.html index 3ec5b29b33..1b90f98784 100644 --- a/docs/dyn/aiplatform_v1.endpoints.html +++ b/docs/dyn/aiplatform_v1.endpoints.html @@ -148,6 +148,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -231,6 +232,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -358,6 +360,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -466,7 +469,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -571,6 +620,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -704,6 +754,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -827,7 +878,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -928,6 +1025,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1155,6 +1253,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1288,6 +1387,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1411,7 +1511,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1512,6 +1658,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1.media.html b/docs/dyn/aiplatform_v1.media.html index 21928fc9d9..4cf6920c49 100644 --- a/docs/dyn/aiplatform_v1.media.html +++ b/docs/dyn/aiplatform_v1.media.html @@ -124,6 +124,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -143,6 +144,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -159,6 +161,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -232,6 +235,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -251,6 +255,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -267,6 +272,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. diff --git a/docs/dyn/aiplatform_v1.projects.locations.cachedContents.html b/docs/dyn/aiplatform_v1.projects.locations.cachedContents.html index d1f4e1fcac..bbd790674e 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.cachedContents.html +++ b/docs/dyn/aiplatform_v1.projects.locations.cachedContents.html @@ -146,6 +146,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -197,6 +198,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -320,7 +322,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -412,6 +460,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -463,6 +512,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -586,7 +636,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -703,6 +799,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -754,6 +851,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -877,7 +975,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -980,6 +1124,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1031,6 +1176,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1154,7 +1300,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1265,6 +1457,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1316,6 +1509,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1439,7 +1633,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1532,6 +1772,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1583,6 +1824,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1706,7 +1948,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html index 101f4a06d0..4dc1cf9d27 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html @@ -207,6 +207,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -290,6 +291,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -417,6 +419,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -525,7 +528,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1551,6 +1600,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1684,6 +1734,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1807,7 +1858,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1908,6 +2005,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -3599,6 +3697,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -3732,6 +3831,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -3855,7 +3955,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -3956,6 +4102,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1.projects.locations.html b/docs/dyn/aiplatform_v1.projects.locations.html index df4e3f8729..50c0686105 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.html +++ b/docs/dyn/aiplatform_v1.projects.locations.html @@ -317,6 +317,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -404,6 +405,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -482,6 +484,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -763,6 +766,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -853,6 +857,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html index 60448c3093..9b9bb366cc 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html @@ -158,6 +158,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -241,6 +242,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -368,6 +370,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -476,7 +479,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -623,6 +672,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -756,6 +806,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -879,7 +930,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -980,6 +1077,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1480,6 +1578,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1613,6 +1712,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1736,7 +1836,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1837,6 +1983,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.html b/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.html index 5959ccde14..4eb8810397 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.html +++ b/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.html @@ -134,6 +134,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -147,6 +148,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexVectorSearch": { # The config for the Vertex Vector Search. # The config for the Vertex Vector Search. "index": "A String", # The resource name of the Index. Format: `projects/{project}/locations/{location}/indexes/{index}` @@ -251,6 +258,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -264,6 +272,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexVectorSearch": { # The config for the Vertex Vector Search. # The config for the Vertex Vector Search. "index": "A String", # The resource name of the Index. Format: `projects/{project}/locations/{location}/indexes/{index}` @@ -309,6 +323,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -322,6 +337,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexVectorSearch": { # The config for the Vertex Vector Search. # The config for the Vertex Vector Search. "index": "A String", # The resource name of the Index. Format: `projects/{project}/locations/{location}/indexes/{index}` @@ -373,6 +394,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -386,6 +408,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexVectorSearch": { # The config for the Vertex Vector Search. # The config for the Vertex Vector Search. "index": "A String", # The resource name of the Index. Format: `projects/{project}/locations/{location}/indexes/{index}` diff --git a/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.ragFiles.html b/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.ragFiles.html index 6b4ec87b56..d90db884aa 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.ragFiles.html +++ b/docs/dyn/aiplatform_v1.projects.locations.ragCorpora.ragFiles.html @@ -180,6 +180,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -199,6 +200,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -215,6 +217,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -265,6 +268,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -289,7 +293,7 @@

Method Details

"maxParsingRequestsPerMin": 42, # The maximum number of requests the job is allowed to make to the Document AI processor per minute. Consult https://cloud.google.com/document-ai/quotas and the Quota page for your project to set an appropriate value here. If unspecified, a default value of 120 QPM would be used. "processorName": "A String", # The full resource name of a Document AI processor or processor version. The processor must have type `LAYOUT_PARSER_PROCESSOR`. If specified, the `additional_config.parse_as_scanned_pdf` field must be false. Format: * `projects/{project_id}/locations/{location}/processors/{processor_id}` * `projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}` }, - "llmParser": { # Specifies the advanced parsing for RagFiles. # The LLM Parser to use for RagFiles. + "llmParser": { # Specifies the LLM parsing for RagFiles. # The LLM Parser to use for RagFiles. "customParsingPrompt": "A String", # The prompt to use for parsing. If not specified, a default prompt will be used. "maxParsingRequestsPerMin": 42, # The maximum number of requests the job is allowed to make to the LLM model per minute. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If unspecified, a default value of 5000 QPM would be used. "modelName": "A String", # The name of a LLM model used for parsing. Format: * `projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}` @@ -303,12 +307,14 @@

Method Details

}, }, }, + "rebuildAnnIndex": True or False, # Rebuilds the ANN index to optimize for recall on the imported data. Only applicable for RagCorpora running on RagManagedDb with `retrieval_strategy` set to `ANN`. The rebuild will be performed using the existing ANN config set on the RagCorpus. To change the ANN config, please use the UpdateRagCorpus API. Default is false, i.e., index is not rebuilt. "sharePointSources": { # The SharePointSources to pass to ImportRagFiles. # SharePoint sources. "sharePointSources": [ # The SharePoint sources. { # An individual SharePointSource. "clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -325,6 +331,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -415,6 +422,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -434,6 +442,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -450,6 +459,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. diff --git a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html index 457d28d9a5..aa80a2c5e5 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html @@ -236,6 +236,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -406,6 +407,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -583,6 +585,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -766,6 +769,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -964,6 +968,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1.publishers.models.html b/docs/dyn/aiplatform_v1.publishers.models.html index f1349851a6..a0792506b3 100644 --- a/docs/dyn/aiplatform_v1.publishers.models.html +++ b/docs/dyn/aiplatform_v1.publishers.models.html @@ -146,6 +146,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -229,6 +230,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -356,6 +358,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -464,7 +467,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -569,6 +618,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -702,6 +752,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -825,7 +876,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -926,6 +1023,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1704,6 +1802,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1837,6 +1936,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. @@ -1960,7 +2060,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -2061,6 +2207,7 @@

Method Details

"mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). + "thought": True or False, # Output only. Indicates if the part is thought from the model. "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. "endOffset": "A String", # Optional. The end offset of the video. "startOffset": "A String", # Optional. The start offset of the video. diff --git a/docs/dyn/aiplatform_v1beta1.endpoints.html b/docs/dyn/aiplatform_v1beta1.endpoints.html index 99d4a15bc8..4ee910a72e 100644 --- a/docs/dyn/aiplatform_v1beta1.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.endpoints.html @@ -478,7 +478,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -854,7 +900,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1459,7 +1551,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1beta1.media.html b/docs/dyn/aiplatform_v1beta1.media.html index d83b4296a0..4b1d1c29ec 100644 --- a/docs/dyn/aiplatform_v1beta1.media.html +++ b/docs/dyn/aiplatform_v1beta1.media.html @@ -124,6 +124,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -144,6 +145,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -161,6 +163,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -244,6 +247,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -264,6 +268,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -281,6 +286,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html b/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html index 251106ea65..deb3ec5261 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html @@ -326,7 +326,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -604,7 +650,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -907,7 +999,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1196,7 +1334,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1493,7 +1677,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1772,7 +2002,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.datasets.html b/docs/dyn/aiplatform_v1beta1.projects.locations.datasets.html index 178b0510a0..cd90f58d60 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.datasets.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.datasets.html @@ -458,7 +458,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -811,7 +857,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1213,7 +1305,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1566,7 +1704,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html index f97c6b8d60..c9d8da3410 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html @@ -546,7 +546,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -2002,7 +2048,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -4245,7 +4337,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.featureMonitorJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.featureMonitorJobs.html index 07527a8792..729aff45ea 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.featureMonitorJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.featureMonitorJobs.html @@ -112,7 +112,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Output only. Feature selection config used when creating FeatureMonitorJob. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -165,7 +165,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Output only. Feature selection config used when creating FeatureMonitorJob. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -224,7 +224,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Output only. Feature selection config used when creating FeatureMonitorJob. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -289,7 +289,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Output only. Feature selection config used when creating FeatureMonitorJob. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.html index 3a27c5d06c..9f553bc55d 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.featureMonitors.html @@ -127,7 +127,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Required. Feature selection config for the FeatureMonitor. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -228,7 +228,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Required. Feature selection config for the FeatureMonitor. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -271,7 +271,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Required. Feature selection config for the FeatureMonitor. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], @@ -320,7 +320,7 @@

Method Details

"featureSelectionConfig": { # Feature selection configuration for the FeatureMonitor. # Required. Feature selection config for the FeatureMonitor. "featureConfigs": [ # Optional. A list of features to be monitored and each feature's drift threshold. { # Feature configuration. - "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. + "driftThreshold": 3.14, # Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1). "featureId": "A String", # Required. The ID of the feature resource. Final component of the Feature's resource name. }, ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html b/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html index 45e4486b74..92fa9424c9 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html @@ -135,6 +135,9 @@

Method Details

{ # Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. "createTime": "A String", # Output only. Timestamp when this ModelMonitor was created. "displayName": "A String", # The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Customer-managed encryption key spec for a ModelMonitor. If set, this ModelMonitor and all sub-resources of this ModelMonitor will be secured by this key. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "explanationSpec": { # Specification of Model explanation. # Optional model explanation spec. It is used for feature attribution monitoring. "metadata": { # Metadata describing the Model's input and output for explanation. # Optional. Metadata describing the Model's input and output for explanation. "featureAttributionsSchemaUri": "A String", # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. @@ -483,6 +486,9 @@

Method Details

{ # Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. "createTime": "A String", # Output only. Timestamp when this ModelMonitor was created. "displayName": "A String", # The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Customer-managed encryption key spec for a ModelMonitor. If set, this ModelMonitor and all sub-resources of this ModelMonitor will be secured by this key. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "explanationSpec": { # Specification of Model explanation. # Optional model explanation spec. It is used for feature attribution monitoring. "metadata": { # Metadata describing the Model's input and output for explanation. # Optional. Metadata describing the Model's input and output for explanation. "featureAttributionsSchemaUri": "A String", # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. @@ -772,6 +778,9 @@

Method Details

{ # Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. "createTime": "A String", # Output only. Timestamp when this ModelMonitor was created. "displayName": "A String", # The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Customer-managed encryption key spec for a ModelMonitor. If set, this ModelMonitor and all sub-resources of this ModelMonitor will be secured by this key. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "explanationSpec": { # Specification of Model explanation. # Optional model explanation spec. It is used for feature attribution monitoring. "metadata": { # Metadata describing the Model's input and output for explanation. # Optional. Metadata describing the Model's input and output for explanation. "featureAttributionsSchemaUri": "A String", # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. @@ -1067,6 +1076,9 @@

Method Details

{ # Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. "createTime": "A String", # Output only. Timestamp when this ModelMonitor was created. "displayName": "A String", # The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Customer-managed encryption key spec for a ModelMonitor. If set, this ModelMonitor and all sub-resources of this ModelMonitor will be secured by this key. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "explanationSpec": { # Specification of Model explanation. # Optional model explanation spec. It is used for feature attribution monitoring. "metadata": { # Metadata describing the Model's input and output for explanation. # Optional. Metadata describing the Model's input and output for explanation. "featureAttributionsSchemaUri": "A String", # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html index 70754ac56f..d233f7411d 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html @@ -500,7 +500,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -990,7 +1036,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1955,7 +2047,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html index 7333bb3973..f0b03d99e3 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html @@ -128,6 +128,9 @@

Method Details

"createTime": "A String", # Output only. Timestamp when this RagCorpus was created. "description": "A String", # Optional. The description of the RagCorpus. "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Optional. Immutable. The CMEK key name used to encrypt at-rest data related to this Corpus. Only applicable to RagManagedDb option for Vector DB. This field can only be set at corpus creation time, and cannot be updated or deleted. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "name": "A String", # Output only. The resource name of the RagCorpus. "ragEmbeddingModelConfig": { # Config for the embedding model to use for RAG. # Optional. Immutable. The embedding model config of the RagCorpus. "hybridSearchConfig": { # Config for hybrid search. # Configuration for hybrid search. @@ -155,6 +158,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -182,6 +186,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -200,6 +210,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -227,6 +238,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -332,6 +349,9 @@

Method Details

"createTime": "A String", # Output only. Timestamp when this RagCorpus was created. "description": "A String", # Optional. The description of the RagCorpus. "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Optional. Immutable. The CMEK key name used to encrypt at-rest data related to this Corpus. Only applicable to RagManagedDb option for Vector DB. This field can only be set at corpus creation time, and cannot be updated or deleted. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "name": "A String", # Output only. The resource name of the RagCorpus. "ragEmbeddingModelConfig": { # Config for the embedding model to use for RAG. # Optional. Immutable. The embedding model config of the RagCorpus. "hybridSearchConfig": { # Config for hybrid search. # Configuration for hybrid search. @@ -359,6 +379,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -386,6 +407,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -404,6 +431,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -431,6 +459,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -477,6 +511,9 @@

Method Details

"createTime": "A String", # Output only. Timestamp when this RagCorpus was created. "description": "A String", # Optional. The description of the RagCorpus. "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Optional. Immutable. The CMEK key name used to encrypt at-rest data related to this Corpus. Only applicable to RagManagedDb option for Vector DB. This field can only be set at corpus creation time, and cannot be updated or deleted. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "name": "A String", # Output only. The resource name of the RagCorpus. "ragEmbeddingModelConfig": { # Config for the embedding model to use for RAG. # Optional. Immutable. The embedding model config of the RagCorpus. "hybridSearchConfig": { # Config for hybrid search. # Configuration for hybrid search. @@ -504,6 +541,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -531,6 +569,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -549,6 +593,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -576,6 +621,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -628,6 +679,9 @@

Method Details

"createTime": "A String", # Output only. Timestamp when this RagCorpus was created. "description": "A String", # Optional. The description of the RagCorpus. "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Optional. Immutable. The CMEK key name used to encrypt at-rest data related to this Corpus. Only applicable to RagManagedDb option for Vector DB. This field can only be set at corpus creation time, and cannot be updated or deleted. + "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + }, "name": "A String", # Output only. The resource name of the RagCorpus. "ragEmbeddingModelConfig": { # Config for the embedding model to use for RAG. # Optional. Immutable. The embedding model config of the RagCorpus. "hybridSearchConfig": { # Config for hybrid search. # Configuration for hybrid search. @@ -655,6 +709,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -682,6 +737,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` @@ -700,6 +761,7 @@

Method Details

"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # Authentication config for the chosen Vector DB. "apiKeyConfig": { # The API secret. # The API secret. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, }, "pinecone": { # The config for the Pinecone. # The config for the Pinecone. @@ -727,6 +789,12 @@

Method Details

}, }, "ragManagedDb": { # The config for the default RAG-managed Vector DB. # The config for the RAG-managed Vector DB. + "ann": { # Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API. # Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency. + "leafCount": 42, # Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500. + "treeDepth": 42, # The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2. + }, + "knn": { # Config for KNN search. # Performs a KNN search on RagCorpus. Default choice if not specified. + }, }, "vertexFeatureStore": { # The config for the Vertex Feature Store. # The config for the Vertex Feature Store. "featureViewResourceName": "A String", # The resource name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html index a572a88900..f80f034f30 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html @@ -180,6 +180,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -200,6 +201,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -217,6 +219,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -268,6 +271,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -304,7 +308,7 @@

Method Details

"maxParsingRequestsPerMin": 42, # The maximum number of requests the job is allowed to make to the Document AI processor per minute. Consult https://cloud.google.com/document-ai/quotas and the Quota page for your project to set an appropriate value here. If unspecified, a default value of 120 QPM would be used. "processorName": "A String", # The full resource name of a Document AI processor or processor version. The processor must have type `LAYOUT_PARSER_PROCESSOR`. If specified, the `additional_config.parse_as_scanned_pdf` field must be false. Format: * `projects/{project_id}/locations/{location}/processors/{processor_id}` * `projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}` }, - "llmParser": { # Specifies the advanced parsing for RagFiles. # The LLM Parser to use for RagFiles. + "llmParser": { # Specifies the LLM parsing for RagFiles. # The LLM Parser to use for RagFiles. "customParsingPrompt": "A String", # The prompt to use for parsing. If not specified, a default prompt will be used. "globalMaxParsingRequestsPerMin": 42, # The maximum number of requests the job is allowed to make to the LLM model per minute in this project. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If this value is not specified, max_parsing_requests_per_min will be used by indexing pipeline job as the global limit. "maxParsingRequestsPerMin": 42, # The maximum number of requests the job is allowed to make to the LLM model per minute. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If unspecified, a default value of 5000 QPM would be used. @@ -322,12 +326,14 @@

Method Details

}, }, }, + "rebuildAnnIndex": True or False, # Rebuilds the ANN index to optimize for recall on the imported data. Only applicable for RagCorpora running on RagManagedDb with `retrieval_strategy` set to `ANN`. The rebuild will be performed using the existing ANN config set on the RagCorpus. To change the ANN config, please use the UpdateRagCorpus API. Default is false, i.e., index is not rebuilt. "sharePointSources": { # The SharePointSources to pass to ImportRagFiles. # SharePoint sources. "sharePointSources": [ # The SharePoint sources. { # An individual SharePointSource. "clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -344,6 +350,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. @@ -434,6 +441,7 @@

Method Details

{ # JiraQueries contains the Jira queries and corresponding authentication. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ "A String", @@ -454,6 +462,7 @@

Method Details

"clientId": "A String", # The Application ID for the app registered in Microsoft Azure Portal. The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. "clientSecret": { # The API secret. # The application secret for the app registered in Azure. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "driveId": "A String", # The ID of the drive to download from. "driveName": "A String", # The name of the drive to download from. @@ -471,6 +480,7 @@

Method Details

{ # SlackChannels contains the Slack channels and corresponding access token. "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Slack channel access token that has access to the slack channel IDs. See: https://api.slack.com/tutorials/tracks/getting-a-token. "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. }, "channels": [ # Required. The Slack channel IDs. { # SlackChannel contains the Slack channel ID and the time range to import. diff --git a/docs/dyn/aiplatform_v1beta1.publishers.models.html b/docs/dyn/aiplatform_v1beta1.publishers.models.html index 34f69d96c2..0e956ccd87 100644 --- a/docs/dyn/aiplatform_v1beta1.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.publishers.models.html @@ -482,7 +482,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -858,7 +904,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -2610,7 +2702,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. diff --git a/docs/dyn/androidpublisher_v3.orders.html b/docs/dyn/androidpublisher_v3.orders.html index 972b76882b..c33547abfa 100644 --- a/docs/dyn/androidpublisher_v3.orders.html +++ b/docs/dyn/androidpublisher_v3.orders.html @@ -104,7 +104,7 @@

Method Details

{ # Response for the orders.batchGet API. "orders": [ # Details for the requested order IDs. - { # Details of an order. + { # The Order resource encapsulates comprehensive information about a transaction made on Google Play. It includes a variety of attributes that provide details about the order itself, the products purchased, and the history of events related to the order. The Orders APIs provide real-time access to your order data within the Google Play ecosystem. You can retrieve detailed information and metadata for both one-time and recurring orders, including transaction details like charges, taxes, and refunds, as well as metadata such as pricing phases for subscriptions. The Orders APIs let you automate tasks related to order management, reducing the need for manual checks via the Play Developer Console. The following are some of the use cases for this API: + Real-time order data retrieval - Get order details and metadata immediately after a purchase using an order ID. + Order update synchronization - Periodically sync order updates to maintain an up-to-date record of order information. Note: + The Orders API calls count towards your Play Developer API quota, which defaults to 200K daily, and may be insufficient to sync extensive order histories. + A maximum of 1000 orders can be retrieved per call. Using larger page sizes is recommended to minimize quota usage. Check your quota in the Cloud Console and request more if required. "buyerAddress": { # Address information for the customer, for use in tax computation. # Address information for the customer, for use in tax computation. When Google is the Merchant of Record for the order, only country is shown. "buyerCountry": "A String", # Two letter country code based on ISO-3166-1 Alpha-2 (UN country codes). "buyerPostcode": "A String", # Postal code of an address. When Google is the Merchant of Record for the order, this information is not included. @@ -245,7 +245,7 @@

Method Details

Returns: An object of the form: - { # Details of an order. + { # The Order resource encapsulates comprehensive information about a transaction made on Google Play. It includes a variety of attributes that provide details about the order itself, the products purchased, and the history of events related to the order. The Orders APIs provide real-time access to your order data within the Google Play ecosystem. You can retrieve detailed information and metadata for both one-time and recurring orders, including transaction details like charges, taxes, and refunds, as well as metadata such as pricing phases for subscriptions. The Orders APIs let you automate tasks related to order management, reducing the need for manual checks via the Play Developer Console. The following are some of the use cases for this API: + Real-time order data retrieval - Get order details and metadata immediately after a purchase using an order ID. + Order update synchronization - Periodically sync order updates to maintain an up-to-date record of order information. Note: + The Orders API calls count towards your Play Developer API quota, which defaults to 200K daily, and may be insufficient to sync extensive order histories. + A maximum of 1000 orders can be retrieved per call. Using larger page sizes is recommended to minimize quota usage. Check your quota in the Cloud Console and request more if required. "buyerAddress": { # Address information for the customer, for use in tax computation. # Address information for the customer, for use in tax computation. When Google is the Merchant of Record for the order, only country is shown. "buyerCountry": "A String", # Two letter country code based on ISO-3166-1 Alpha-2 (UN country codes). "buyerPostcode": "A String", # Postal code of an address. When Google is the Merchant of Record for the order, this information is not included. diff --git a/docs/dyn/androidpublisher_v3.purchases.subscriptions.html b/docs/dyn/androidpublisher_v3.purchases.subscriptions.html index fe34468885..0e7633cc2a 100644 --- a/docs/dyn/androidpublisher_v3.purchases.subscriptions.html +++ b/docs/dyn/androidpublisher_v3.purchases.subscriptions.html @@ -88,13 +88,13 @@

Instance Methods

Defers a user's subscription purchase until a specified future expiration time.

get(packageName, subscriptionId, token, x__xgafv=None)

-

Checks whether a user's subscription purchase is valid and returns its expiry time.

+

Deprecated: Use purchases.subscriptionsv2.get instead. Checks whether a user's subscription purchase is valid and returns its expiry time.

refund(packageName, subscriptionId, token, x__xgafv=None)

-

Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.

+

Deprecated: Use orders.refund instead. Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.

revoke(packageName, subscriptionId, token, x__xgafv=None)

-

Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.

+

Deprecated: Use purchases.subscriptionsv2.revoke instead. Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.

Method Details

acknowledge(packageName, subscriptionId, token, body=None, x__xgafv=None) @@ -171,7 +171,7 @@

Method Details

get(packageName, subscriptionId, token, x__xgafv=None) -
Checks whether a user's subscription purchase is valid and returns its expiry time.
+  
Deprecated: Use purchases.subscriptionsv2.get instead. Checks whether a user's subscription purchase is valid and returns its expiry time.
 
 Args:
   packageName: string, The package name of the application for which this subscription was purchased (for example, 'com.some.thing'). (required)
@@ -234,7 +234,7 @@ 

Method Details

refund(packageName, subscriptionId, token, x__xgafv=None) -
Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.
+  
Deprecated: Use orders.refund instead. Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.
 
 Args:
   packageName: string, The package name of the application for which this subscription was purchased (for example, 'com.some.thing'). (required)
@@ -249,7 +249,7 @@ 

Method Details

revoke(packageName, subscriptionId, token, x__xgafv=None) -
Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.
+  
Deprecated: Use purchases.subscriptionsv2.revoke instead. Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.
 
 Args:
   packageName: string, The package name of the application for which this subscription was purchased (for example, 'com.some.thing'). (required)
diff --git a/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html b/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html
index 79439a930f..3a05f288c4 100644
--- a/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html
+++ b/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html
@@ -127,7 +127,7 @@ 

Method Details

"obfuscatedExternalProfileId": "A String", # An obfuscated version of the id that is uniquely associated with the user's profile in your app. Only present if specified using https://developer.android.com/reference/com/android/billingclient/api/BillingFlowParams.Builder#setobfuscatedprofileid when the purchase was made. }, "kind": "A String", # This kind represents a SubscriptionPurchaseV2 object in the androidpublisher service. - "latestOrderId": "A String", # The order id of the latest order associated with the purchase of the subscription. For autoRenewing subscription, this is the order id of signup order if it is not renewed yet, or the last recurring order id (success, pending, or declined order). For prepaid subscription, this is the order id associated with the queried purchase token. + "latestOrderId": "A String", # Deprecated: Use line_items.latest_successful_order_id instead. The order id of the latest order associated with the purchase of the subscription. For autoRenewing subscription, this is the order id of signup order if it is not renewed yet, or the last recurring order id (success, pending, or declined order). For prepaid subscription, this is the order id associated with the queried purchase token. "lineItems": [ # Item-level info for a subscription purchase. The items in the same purchase should be either all with AutoRenewingPlan or all with PrepaidPlan. { # Item-level info for a subscription purchase. "autoRenewingPlan": { # Information related to an auto renewing plan. # The item is auto renewing. diff --git a/docs/dyn/apigee_v1.organizations.appgroups.apps.keys.html b/docs/dyn/apigee_v1.organizations.appgroups.apps.keys.html index d24e2e2f9c..9bebac8bf2 100644 --- a/docs/dyn/apigee_v1.organizations.appgroups.apps.keys.html +++ b/docs/dyn/apigee_v1.organizations.appgroups.apps.keys.html @@ -93,7 +93,7 @@

Instance Methods

Gets details for a consumer key for a AppGroup app, including the key and secret value, associated API products, and other information.

updateAppGroupAppKey(name, body=None, x__xgafv=None)

-

Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.

+

Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.

Method Details

close() @@ -246,7 +246,7 @@

Method Details

updateAppGroupAppKey(name, body=None, x__xgafv=None) -
Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.
+  
Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.
 
 Args:
   name: string, Required. Name of the AppGroup app key. Use the following structure in your request: `organizations/{org}/appgroups/{app_group_name}/apps/{app}/keys/{key}` (required)
@@ -258,7 +258,7 @@ 

Method Details

"apiProducts": [ # The list of API products that will be associated with the credential. This list will be appended to the existing list of associated API Products for this App Key. Duplicates will be ignored. "A String", ], - "appGroupAppKey": { # AppGroupAppKey contains all the information associated with the credentials. # The new AppGroupKey to be amended. Note that the status can be updated only via action. + "appGroupAppKey": { # AppGroupAppKey contains all the information associated with the credentials. # Note that only Scopes and Attributes of the AppGroupAppKey can be amended. "apiProducts": [ # Output only. List of API products and its status for which the credential can be used. **Note**: Use UpdateAppGroupAppKeyApiProductRequest API to make the association after the consumer key and secret are created. { # APIProductAssociation has the API product and its administrative state association. "apiproduct": "A String", # API product to be associated with the credential. diff --git a/docs/dyn/apigee_v1.organizations.developers.apps.keys.html b/docs/dyn/apigee_v1.organizations.developers.apps.keys.html index 8ce4702185..19f55bbe8e 100644 --- a/docs/dyn/apigee_v1.organizations.developers.apps.keys.html +++ b/docs/dyn/apigee_v1.organizations.developers.apps.keys.html @@ -98,7 +98,7 @@

Instance Methods

Updates the scope of an app. This API replaces the existing scopes with those specified in the request. Include or exclude any existing scopes that you want to retain or delete, respectively. The specified scopes must already be defined for the API products associated with the app. This API sets the `scopes` element under the `apiProducts` element in the attributes of the app.

updateDeveloperAppKey(name, action=None, body=None, x__xgafv=None)

-

Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to a developer app key. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.

+

Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes associated with the API product to the developer app key. The status of the key can be updated via "action" Query Parameter. None of the other fields can be updated via this API. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. None of the other fields can be updated. You can use the same key to access all API products associated with the app.

Method Details

close() @@ -239,7 +239,7 @@

Method Details

updateDeveloperAppKey(name, action=None, body=None, x__xgafv=None) -
Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to a developer app key. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.
+  
Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes associated with the API product to the developer app key. The status of the key can be updated via "action" Query Parameter. None of the other fields can be updated via this API. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. None of the other fields can be updated. You can use the same key to access all API products associated with the app.
 
 Args:
   name: string, Name of the developer app key. Use the following structure in your request: `organizations/{org}/developers/{developer_email}/apps/{app}/keys/{key}` (required)
diff --git a/docs/dyn/apihub_v1.projects.locations.deployments.html b/docs/dyn/apihub_v1.projects.locations.deployments.html
index 35df955740..59d4ddb727 100644
--- a/docs/dyn/apihub_v1.projects.locations.deployments.html
+++ b/docs/dyn/apihub_v1.projects.locations.deployments.html
@@ -210,7 +210,7 @@ 

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -363,7 +363,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -540,7 +540,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -704,7 +704,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -875,7 +875,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -1028,7 +1028,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. diff --git a/docs/dyn/apihub_v1.projects.locations.html b/docs/dyn/apihub_v1.projects.locations.html index 28e37ddf0e..05fc93978d 100644 --- a/docs/dyn/apihub_v1.projects.locations.html +++ b/docs/dyn/apihub_v1.projects.locations.html @@ -566,7 +566,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. @@ -1495,7 +1495,7 @@

Method Details

}, }, "name": "A String", # Identifier. The name of the deployment. Format: `projects/{project}/locations/{location}/deployments/{deployment}` - "resourceUri": "A String", # Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy. + "resourceUri": "A String", # Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment. "slo": { # The attribute values associated with resource. # Optional. The SLO for this deployment. This maps to the following system defined attribute: `projects/{project}/locations/{location}/attributes/system-slo` attribute. The number of values for this attribute will be based on the cardinality of the attribute. The same can be retrieved via GetAttribute API. All values should be from the list of allowed values defined for the attribute. "attribute": "A String", # Output only. The name of the attribute. Format: projects/{project}/locations/{location}/attributes/{attribute} "enumValues": { # The attribute values of data type enum. # The attribute values associated with a resource in case attribute data type is enum. diff --git a/docs/dyn/apim_v1alpha.projects.locations.html b/docs/dyn/apim_v1alpha.projects.locations.html index 7c0dbb7de1..65d45531f3 100644 --- a/docs/dyn/apim_v1alpha.projects.locations.html +++ b/docs/dyn/apim_v1alpha.projects.locations.html @@ -96,7 +96,7 @@

Instance Methods

get(name, x__xgafv=None)

Gets information about a location.

- list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+ list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

Lists information about the supported locations for this service.

listApiObservationTags(parent, pageSize=None, pageToken=None, x__xgafv=None)

@@ -141,11 +141,12 @@

Method Details

- list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) + list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists information about the supported locations for this service.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
+  extraLocationTypes: string, Optional. A list of extra location types that should be used as conditions for controlling the visibility of the locations. (repeated)
   filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like `"displayName=tokyo"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).
   pageSize: integer, The maximum number of results to return. If not set, the service selects a default.
   pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.
diff --git a/docs/dyn/apim_v1alpha.projects.locations.operations.html b/docs/dyn/apim_v1alpha.projects.locations.operations.html
index d44ac2c0b9..2ae4dc48ba 100644
--- a/docs/dyn/apim_v1alpha.projects.locations.operations.html
+++ b/docs/dyn/apim_v1alpha.projects.locations.operations.html
@@ -76,7 +76,7 @@ 

API Management API . cancel(name, body=None, x__xgafv=None)

-

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.

close()

Close httplib2 connections.

@@ -95,7 +95,7 @@

Instance Methods

Method Details

cancel(name, body=None, x__xgafv=None) -
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+  
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.
 
 Args:
   name: string, The name of the operation resource to be cancelled. (required)
diff --git a/docs/dyn/areainsights_v1.v1.html b/docs/dyn/areainsights_v1.v1.html
index 616cf909d7..d221c19995 100644
--- a/docs/dyn/areainsights_v1.v1.html
+++ b/docs/dyn/areainsights_v1.v1.html
@@ -116,7 +116,7 @@ 

Method Details

}, }, "region": { # A region is a geographic boundary such as: cities, postal codes, counties, states, etc. # Area as region. - "place": "A String", # The unique identifier of a specific geographic region. + "place": "A String", # The [place ID](https://developers.google.com/maps/documentation/places/web-service/place-id) of the geographic region. Not all region types are supported; see documentation for details. **Format:** Must be in the format `places/PLACE_ID`, where `PLACE_ID` is the unique identifier of a place. For example: `places/ChIJPV4oX_65j4ARVW8IJ6IJUYs`. }, }, "operatingStatus": [ # Optional. Restricts results to places whose operating status is included on this list. If operating_status is not set, OPERATING_STATUS_OPERATIONAL is used as default. diff --git a/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html b/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html index eb177b0942..3ee6a1f272 100644 --- a/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html +++ b/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html @@ -88,7 +88,7 @@

Instance Methods

Update existing ProvisioningConfig.

submit(parent, body=None, x__xgafv=None)

-

Submit a provisiong configuration for a given project.

+

Submit a provisioning configuration for a given project.

Method Details

close() @@ -118,7 +118,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -238,7 +238,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -364,7 +364,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -485,7 +485,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -606,7 +606,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -706,7 +706,7 @@

Method Details

submit(parent, body=None, x__xgafv=None) -
Submit a provisiong configuration for a given project.
+  
Submit a provisioning configuration for a given project.
 
 Args:
   parent: string, Required. The parent project and location containing the ProvisioningConfig. (required)
@@ -729,7 +729,7 @@ 

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. @@ -850,7 +850,7 @@

Method Details

"networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. }, "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. - "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "id": "A String", # A transient unique identifier to identify an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. diff --git a/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.assignments.html b/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.assignments.html index 25baa1440d..5781872d5c 100644 --- a/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.assignments.html +++ b/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.assignments.html @@ -83,6 +83,9 @@

Instance Methods

delete(name, x__xgafv=None)

Deletes a assignment. No expansion will happen. Example: * Organization `organizationA` contains two projects, `project1` and `project2`. * Reservation `res1` exists and was created previously. * CreateAssignment was used previously to define the following associations between entities and reservations: `` and `` In this example, deletion of the `` assignment won't affect the other assignment ``. After said deletion, queries from `project1` will still use `res1` while queries from `project2` will switch to use on-demand mode.

+

+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

Lists assignments. Only explicitly created assignments will be returned. Example: * Organization `organizationA` contains two projects, `project1` and `project2`. * Reservation `res1` exists and was created previously. * CreateAssignment was used previously to define the following associations between entities and reservations: `` and `` In this example, ListAssignments will just return the above two assignments for reservation `res1`, and no expansion/merge will happen. The wildcard "-" can be used for reservations in the request. In that case all assignments belongs to the specified project and location will be listed. **Note** "-" cannot be used for projects nor locations.

@@ -95,6 +98,12 @@

Instance Methods

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates an existing assignment. Only the `priority` field can be updated.

+

+ setIamPolicy(resource, body=None, x__xgafv=None)

+

Sets an access control policy for a resource. Replaces any existing policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.setIamPolicy` to set policies on reservations.

+

+ testIamPermissions(resource, body=None, x__xgafv=None)

+

Gets your permissions on a resource. Returns an empty set of permissions if the resource doesn't exist. Supported resources are: - Reservations No Google IAM permissions are required to call this method.

Method Details

close() @@ -154,6 +163,54 @@

Method Details

}
+
+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  options_requestedPolicyVersion: integer, Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
Lists assignments. Only explicitly created assignments will be returned. Example: * Organization `organizationA` contains two projects, `project1` and `project2`. * Reservation `res1` exists and was created previously. * CreateAssignment was used previously to define the following associations between entities and reservations: `` and `` In this example, ListAssignments will just return the above two assignments for reservation `res1`, and no expansion/merge will happen. The wildcard "-" can be used for reservations in the request. In that case all assignments belongs to the specified project and location will be listed. **Note** "-" cannot be used for projects nor locations.
@@ -264,4 +321,119 @@ 

Method Details

}
+
+ setIamPolicy(resource, body=None, x__xgafv=None) +
Sets an access control policy for a resource. Replaces any existing policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.setIamPolicy` to set policies on reservations.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for `SetIamPolicy` method.
+  "policy": { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). # REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them.
+    "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+      { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+        "auditLogConfigs": [ # The configuration for logging of each type of permission.
+          { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+            "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+              "A String",
+            ],
+            "logType": "A String", # The log type that this config enables.
+          },
+        ],
+        "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+      },
+    ],
+    "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+      { # Associates `members`, or principals, with a `role`.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+          "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+        },
+        "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+          "A String",
+        ],
+        "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+      },
+    ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  },
+  "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ testIamPermissions(resource, body=None, x__xgafv=None) +
Gets your permissions on a resource. Returns an empty set of permissions if the resource doesn't exist. Supported resources are: - Reservations No Google IAM permissions are required to call this method.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for `TestIamPermissions` method.
+  "permissions": [ # The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for `TestIamPermissions` method.
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is allowed.
+    "A String",
+  ],
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.html b/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.html index 4679f5515d..c366083011 100644 --- a/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.html +++ b/docs/dyn/bigqueryreservation_v1.projects.locations.reservations.html @@ -96,7 +96,7 @@

Instance Methods

Returns information about the reservation.

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

-

Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.

+

Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

Lists all the reservations for the project in the specified location.

@@ -156,6 +156,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -201,6 +202,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -237,6 +239,7 @@

Method Details

The object takes the form of: { # The request for ReservationService.FailoverReservation. + "failoverMode": "A String", # Optional. failover mode for the failover operation. } x__xgafv: string, V1 error format. @@ -276,6 +279,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -327,6 +331,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -337,7 +342,7 @@

Method Details

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None) -
Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.
+  
Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.
 
 Args:
   resource: string, REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
@@ -431,6 +436,7 @@ 

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -493,6 +499,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). @@ -538,6 +545,7 @@

Method Details

}, "lastErrorTime": "A String", # Output only. The time at which the last error was encountered while trying to replicate changes from the primary to the secondary. This field is only available if the replication has not succeeded since. "lastReplicationTime": "A String", # Output only. A timestamp corresponding to the last change on the primary that was successfully replicated to the secondary. + "softFailoverStartTime": "A String", # Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete. }, "scalingMode": "A String", # Optional. The scaling mode for the reservation. If the field is present but max_slots is not present, requests will be rejected with error code `google.rpc.Code.INVALID_ARGUMENT`. "secondaryLocation": "A String", # Optional. The current location of the reservation's secondary replica. This field is only set for reservations using the managed disaster recovery feature. Users can set this in create reservation calls to create a failover reservation or in update reservation calls to convert a non-failover reservation to a failover reservation(or vice versa). diff --git a/docs/dyn/bigtableadmin_v2.operations.html b/docs/dyn/bigtableadmin_v2.operations.html index 7466f142a8..5f569bf3da 100644 --- a/docs/dyn/bigtableadmin_v2.operations.html +++ b/docs/dyn/bigtableadmin_v2.operations.html @@ -79,18 +79,60 @@

Instance Methods

Returns the projects Resource.

+

+ cancel(name, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.

close()

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

get(name, x__xgafv=None)

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

Method Details

+
+ cancel(name, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+
close()
Close httplib2 connections.
+
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+
get(name, x__xgafv=None)
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
diff --git a/docs/dyn/bigtableadmin_v2.projects.locations.html b/docs/dyn/bigtableadmin_v2.projects.locations.html
index a1d8a23f0f..3095f1989b 100644
--- a/docs/dyn/bigtableadmin_v2.projects.locations.html
+++ b/docs/dyn/bigtableadmin_v2.projects.locations.html
@@ -77,6 +77,9 @@ 

Instance Methods

close()

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

Lists information about the supported locations for this service.

@@ -89,6 +92,33 @@

Method Details

Close httplib2 connections.
+
+ get(name, x__xgafv=None) +
Gets information about a location.
+
+Args:
+  name: string, Resource name for the location. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A resource that represents a Google Cloud location.
+  "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+  "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+    "a_key": "A String",
+  },
+  "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+  "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+}
+
+
list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists information about the supported locations for this service.
diff --git a/docs/dyn/chat_v1.spaces.messages.html b/docs/dyn/chat_v1.spaces.messages.html
index f5040e0bb2..279c4d75dc 100644
--- a/docs/dyn/chat_v1.spaces.messages.html
+++ b/docs/dyn/chat_v1.spaces.messages.html
@@ -412,12 +412,6 @@ 

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -674,12 +668,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1081,12 +1069,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1343,12 +1325,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1455,12 +1431,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1960,12 +1930,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -2222,12 +2186,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -2758,12 +2716,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3020,12 +2972,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3427,12 +3373,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3689,12 +3629,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3801,12 +3735,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -4306,12 +4234,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -4568,12 +4490,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5122,12 +5038,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5384,12 +5294,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5791,12 +5695,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6053,12 +5951,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6165,12 +6057,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6670,12 +6556,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6932,12 +6812,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -7474,12 +7348,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -7736,12 +7604,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8143,12 +8005,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8405,12 +8261,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8517,12 +8367,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9022,12 +8866,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9284,12 +9122,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9831,12 +9663,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10093,12 +9919,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10500,12 +10320,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10762,12 +10576,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10874,12 +10682,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -11379,12 +11181,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -11641,12 +11437,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12171,12 +11961,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12433,12 +12217,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12840,12 +12618,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13102,12 +12874,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13214,12 +12980,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13719,12 +13479,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13981,12 +13735,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -14511,12 +14259,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -14773,12 +14515,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15180,12 +14916,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15442,12 +15172,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15554,12 +15278,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -16059,12 +15777,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -16321,12 +16033,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -16851,12 +16557,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17113,12 +16813,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17520,12 +17214,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17782,12 +17470,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17894,12 +17576,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -18399,12 +18075,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -18661,12 +18331,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. diff --git a/docs/dyn/chat_v1.spaces.spaceEvents.html b/docs/dyn/chat_v1.spaces.spaceEvents.html index a41bb93e4e..32d02d50e0 100644 --- a/docs/dyn/chat_v1.spaces.spaceEvents.html +++ b/docs/dyn/chat_v1.spaces.spaceEvents.html @@ -527,12 +527,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -789,12 +783,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1196,12 +1184,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1458,12 +1440,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -1570,12 +1546,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -2075,12 +2045,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -2337,12 +2301,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -2862,12 +2820,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3124,12 +3076,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3531,12 +3477,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3793,12 +3733,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -3905,12 +3839,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -4410,12 +4338,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -4672,12 +4594,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5197,12 +5113,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5459,12 +5369,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -5866,12 +5770,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6128,12 +6026,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6240,12 +6132,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -6745,12 +6631,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -7007,12 +6887,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -7530,12 +7404,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -7792,12 +7660,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8199,12 +8061,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8461,12 +8317,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -8573,12 +8423,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9078,12 +8922,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9340,12 +9178,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -9861,12 +9693,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10123,12 +9949,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10530,12 +10350,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10792,12 +10606,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -10904,12 +10712,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -11409,12 +11211,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -11671,12 +11467,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12192,12 +11982,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12454,12 +12238,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -12861,12 +12639,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13123,12 +12895,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13235,12 +13001,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -13740,12 +13500,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -14002,12 +13756,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -14922,12 +14670,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15184,12 +14926,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15591,12 +15327,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15853,12 +15583,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -15965,12 +15689,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -16470,12 +16188,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -16732,12 +16444,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17257,12 +16963,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17519,12 +17219,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -17926,12 +17620,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -18188,12 +17876,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -18300,12 +17982,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -18805,12 +18481,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -19067,12 +18737,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -19592,12 +19256,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -19854,12 +19512,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -20261,12 +19913,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -20523,12 +20169,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -20635,12 +20275,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -21140,12 +20774,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -21402,12 +21030,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -21925,12 +21547,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -22187,12 +21803,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -22594,12 +22204,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -22856,12 +22460,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -22968,12 +22566,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -23473,12 +23065,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -23735,12 +23321,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -24256,12 +23836,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -24518,12 +24092,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -24925,12 +24493,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -25187,12 +24749,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -25299,12 +24855,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -25804,12 +25354,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -26066,12 +25610,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -26587,12 +26125,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -26849,12 +26381,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -27256,12 +26782,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -27518,12 +27038,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -27630,12 +27144,6 @@

Method Details

"items": [ # An array of the SelectionItem objects. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -28135,12 +27643,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. @@ -28397,12 +27899,6 @@

Method Details

"items": [ # An array of selectable items. For example, an array of radio buttons or checkboxes. Supports up to 100 items. { # An item that users can select in a selection input, such as a checkbox or switch. Supports up to 100 items. [Google Workspace add-ons and Chat apps](https://developers.google.com/workspace/extend): "bottomText": "A String", # For multiselect menus, a text description or label that's displayed below the item's `text` field. - "materialIcon": { # A [Google Material Icon](https://fonts.google.com/icons), which includes over 2500+ options. For example, to display a [checkbox icon](https://fonts.google.com/icons?selected=Material%20Symbols%20Outlined%3Acheck_box%3AFILL%400%3Bwght%40400%3BGRAD%400%3Bopsz%4048) with customized weight and grade, write the following: ``` { "name": "check_box", "fill": true, "weight": 300, "grade": -25 } ``` [Google Chat apps](https://developers.google.com/workspace/chat): - "fill": True or False, # Whether the icon renders as filled. Default value is false. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "grade": 42, # Weight and grade affect a symbol’s thickness. Adjustments to grade are more granular than adjustments to weight and have a small impact on the size of the symbol. Choose from {-25, 0, 200}. If absent, default value is 0. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - "name": "A String", # The icon name defined in the [Google Material Icon](https://fonts.google.com/icons), for example, `check_box`. Any invalid names are abandoned and replaced with empty string and results in the icon failing to render. - "weight": 42, # The stroke weight of the icon. Choose from {100, 200, 300, 400, 500, 600, 700}. If absent, default value is 400. If any other value is specified, the default value is used. To preview different icon settings, go to [Google Font Icons](https://fonts.google.com/icons) and adjust the settings under **Customize**. - }, "selected": True or False, # Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item. "startIconUri": "A String", "text": "A String", # The text that identifies or describes the item to users. diff --git a/docs/dyn/cloudkms_v1.folders.html b/docs/dyn/cloudkms_v1.folders.html index 22212a7904..59a11198c9 100644 --- a/docs/dyn/cloudkms_v1.folders.html +++ b/docs/dyn/cloudkms_v1.folders.html @@ -80,9 +80,15 @@

Instance Methods

getAutokeyConfig(name, x__xgafv=None)

Returns the AutokeyConfig for a folder.

+

+ getKajPolicyConfig(name, x__xgafv=None)

+

Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

updateAutokeyConfig(name, body=None, updateMask=None, x__xgafv=None)

Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. A KeyHandle creation in the folder's descendant projects will use this configuration to determine where to create the resulting CryptoKey.

+

+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

Method Details

close() @@ -104,12 +110,37 @@

Method Details

An object of the form: { # Cloud KMS Autokey configuration for a folder. + "etag": "A String", # Optional. A checksum computed by the server based on the value of other fields. This may be sent on update requests to ensure that the client has an up-to-date value before proceeding. The request will be rejected with an ABORTED error on a mismatched etag. "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new CryptoKey when a KeyHandle is created. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions). A request with an empty key project field will clear the configuration. "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. "state": "A String", # Output only. The state for the AutokeyConfig. }
+
+ getKajPolicyConfig(name, x__xgafv=None) +
Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Required. The name of the KeyAccessJustificationsPolicyConfig to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+
updateAutokeyConfig(name, body=None, updateMask=None, x__xgafv=None)
Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. A KeyHandle creation in the folder's descendant projects will use this configuration to determine where to create the resulting CryptoKey.
@@ -120,6 +151,7 @@ 

Method Details

The object takes the form of: { # Cloud KMS Autokey configuration for a folder. + "etag": "A String", # Optional. A checksum computed by the server based on the value of other fields. This may be sent on update requests to ensure that the client has an up-to-date value before proceeding. The request will be rejected with an ABORTED error on a mismatched etag. "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new CryptoKey when a KeyHandle is created. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions). A request with an empty key project field will clear the configuration. "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. "state": "A String", # Output only. The state for the AutokeyConfig. @@ -135,10 +167,48 @@

Method Details

An object of the form: { # Cloud KMS Autokey configuration for a folder. + "etag": "A String", # Optional. A checksum computed by the server based on the value of other fields. This may be sent on update requests to ensure that the client has an up-to-date value before proceeding. The request will be rejected with an ABORTED error on a mismatched etag. "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new CryptoKey when a KeyHandle is created. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions). A request with an empty key project field will clear the configuration. "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. "state": "A String", # Output only. The state for the AutokeyConfig. }
+
+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig". (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/cloudkms_v1.html b/docs/dyn/cloudkms_v1.html index 56ddcb4f02..20075d76e7 100644 --- a/docs/dyn/cloudkms_v1.html +++ b/docs/dyn/cloudkms_v1.html @@ -79,6 +79,11 @@

Instance Methods

Returns the folders Resource.

+

+ organizations() +

+

Returns the organizations Resource.

+

projects()

diff --git a/docs/dyn/cloudkms_v1.organizations.html b/docs/dyn/cloudkms_v1.organizations.html new file mode 100644 index 0000000000..b212209926 --- /dev/null +++ b/docs/dyn/cloudkms_v1.organizations.html @@ -0,0 +1,153 @@ + + + +

Cloud Key Management Service (KMS) API . organizations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ getKajPolicyConfig(name, x__xgafv=None)

+

Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

+

+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getKajPolicyConfig(name, x__xgafv=None) +
Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Required. The name of the KeyAccessJustificationsPolicyConfig to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+ +
+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig". (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudkms_v1.projects.html b/docs/dyn/cloudkms_v1.projects.html index e0e0c98a1c..22142553a7 100644 --- a/docs/dyn/cloudkms_v1.projects.html +++ b/docs/dyn/cloudkms_v1.projects.html @@ -82,15 +82,51 @@

Instance Methods

close()

Close httplib2 connections.

+

+ getKajPolicyConfig(name, x__xgafv=None)

+

Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

showEffectiveAutokeyConfig(parent, x__xgafv=None)

Returns the effective Cloud KMS Autokey configuration for a given project.

+

+ showEffectiveKeyAccessJustificationsEnrollmentConfig(project, x__xgafv=None)

+

Returns the KeyAccessJustificationsEnrollmentConfig of the resource closest to the given project in hierarchy.

+

+ showEffectiveKeyAccessJustificationsPolicyConfig(project, x__xgafv=None)

+

Returns the KeyAccessJustificationsPolicyConfig of the resource closest to the given project in hierarchy.

+

+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.

Method Details

close()
Close httplib2 connections.
+
+ getKajPolicyConfig(name, x__xgafv=None) +
Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Required. The name of the KeyAccessJustificationsPolicyConfig to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+
showEffectiveAutokeyConfig(parent, x__xgafv=None)
Returns the effective Cloud KMS Autokey configuration for a given project.
@@ -110,4 +146,97 @@ 

Method Details

}
+
+ showEffectiveKeyAccessJustificationsEnrollmentConfig(project, x__xgafv=None) +
Returns the KeyAccessJustificationsEnrollmentConfig of the resource closest to the given project in hierarchy.
+
+Args:
+  project: string, Required. The number or id of the project to get the effective KeyAccessJustificationsEnrollmentConfig for. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for KeyAccessJustificationsConfig.ShowEffectiveKeyAccessJustificationsEnrollmentConfig
+  "externalConfig": { # The configuration of a protection level for a project's Key Access Justifications enrollment. # The effective KeyAccessJustificationsEnrollmentConfig for external keys.
+    "auditLogging": True or False, # Whether the project has KAJ logging enabled.
+    "policyEnforcement": True or False, # Whether the project is enrolled in KAJ policy enforcement.
+  },
+  "hardwareConfig": { # The configuration of a protection level for a project's Key Access Justifications enrollment. # The effective KeyAccessJustificationsEnrollmentConfig for hardware keys.
+    "auditLogging": True or False, # Whether the project has KAJ logging enabled.
+    "policyEnforcement": True or False, # Whether the project is enrolled in KAJ policy enforcement.
+  },
+  "softwareConfig": { # The configuration of a protection level for a project's Key Access Justifications enrollment. # The effective KeyAccessJustificationsEnrollmentConfig for software keys.
+    "auditLogging": True or False, # Whether the project has KAJ logging enabled.
+    "policyEnforcement": True or False, # Whether the project is enrolled in KAJ policy enforcement.
+  },
+}
+
+ +
+ showEffectiveKeyAccessJustificationsPolicyConfig(project, x__xgafv=None) +
Returns the KeyAccessJustificationsPolicyConfig of the resource closest to the given project in hierarchy.
+
+Args:
+  project: string, Required. The number or id of the project to get the effective KeyAccessJustificationsPolicyConfig. In the format of "projects/{|}" (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for KeyAccessJustificationsConfig.ShowEffectiveKeyAccessJustificationsPolicyConfig.
+  "effectiveKajPolicy": { # A singleton configuration for Key Access Justifications policies. # The effective KeyAccessJustificationsPolicyConfig.
+    "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+      "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+        "A String",
+      ],
+    },
+    "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+  },
+}
+
+ +
+ updateKajPolicyConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.
+
+Args:
+  name: string, Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig". (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A singleton configuration for Key Access Justifications policies.
+  "defaultKeyAccessJustificationPolicy": { # A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey. # Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry.
+    "allowedAccessReasons": [ # The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.
+      "A String",
+    ],
+  },
+  "name": "A String", # Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of "{organizations|folders|projects}/*/kajPolicyConfig".
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/cloudsupport_v2.cases.attachments.html b/docs/dyn/cloudsupport_v2.cases.attachments.html index 79bf2aba17..3bfad47568 100644 --- a/docs/dyn/cloudsupport_v2.cases.attachments.html +++ b/docs/dyn/cloudsupport_v2.cases.attachments.html @@ -111,13 +111,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, ], diff --git a/docs/dyn/cloudsupport_v2.cases.comments.html b/docs/dyn/cloudsupport_v2.cases.comments.html index 7efb2d9608..5d5ff70b9d 100644 --- a/docs/dyn/cloudsupport_v2.cases.comments.html +++ b/docs/dyn/cloudsupport_v2.cases.comments.html @@ -106,7 +106,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -127,7 +127,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -159,7 +159,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, diff --git a/docs/dyn/cloudsupport_v2.cases.html b/docs/dyn/cloudsupport_v2.cases.html index 3e9cc5f315..8a99c6c010 100644 --- a/docs/dyn/cloudsupport_v2.cases.html +++ b/docs/dyn/cloudsupport_v2.cases.html @@ -141,7 +141,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -149,7 +149,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -179,7 +179,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -187,7 +187,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -215,7 +215,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -223,7 +223,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -268,7 +268,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -276,7 +276,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -311,7 +311,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -319,7 +319,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -359,7 +359,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -367,7 +367,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -401,7 +401,7 @@

Method Details

Update a case. Only some fields can be updated. EXAMPLES: cURL: ```shell case="projects/some-project/cases/43595344" curl \ --request PATCH \ --header "Authorization: Bearer $(gcloud auth print-access-token)" \ --header "Content-Type: application/json" \ --data '{ "priority": "P1" }' \ "https://cloudsupport.googleapis.com/v2/$case?updateMask=priority" ``` Python: ```python import googleapiclient.discovery api_version = "v2" supportApiService = googleapiclient.discovery.build( serviceName="cloudsupport", version=api_version, discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", ) request = supportApiService.cases().patch( name="projects/some-project/cases/43112854", body={ "displayName": "This is Now a New Title", "priority": "P2", }, ) print(request.execute()) ```
 
 Args:
-  name: string, The resource name for the case. (required)
+  name: string, Identifier. The resource name for the case. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -414,7 +414,7 @@ 

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -422,7 +422,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -451,7 +451,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -459,7 +459,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. @@ -499,7 +499,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -507,7 +507,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "state": "A String", # Output only. The current status of the support case. "subscriberEmailAddresses": [ # The email addresses to receive updates on this case. diff --git a/docs/dyn/cloudsupport_v2.media.html b/docs/dyn/cloudsupport_v2.media.html index 7aeab137ad..8f141a800d 100644 --- a/docs/dyn/cloudsupport_v2.media.html +++ b/docs/dyn/cloudsupport_v2.media.html @@ -364,13 +364,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, } @@ -389,13 +389,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }
diff --git a/docs/dyn/cloudsupport_v2beta.cases.attachments.html b/docs/dyn/cloudsupport_v2beta.cases.attachments.html index 0e67ac44db..c75a4e933a 100644 --- a/docs/dyn/cloudsupport_v2beta.cases.attachments.html +++ b/docs/dyn/cloudsupport_v2beta.cases.attachments.html @@ -111,13 +111,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, ], diff --git a/docs/dyn/cloudsupport_v2beta.cases.comments.html b/docs/dyn/cloudsupport_v2beta.cases.comments.html index 8f6e5e74f6..0c80605f0d 100644 --- a/docs/dyn/cloudsupport_v2beta.cases.comments.html +++ b/docs/dyn/cloudsupport_v2beta.cases.comments.html @@ -106,7 +106,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -127,7 +127,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -159,7 +159,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, diff --git a/docs/dyn/cloudsupport_v2beta.cases.html b/docs/dyn/cloudsupport_v2beta.cases.html index 01ef81f287..730e148766 100644 --- a/docs/dyn/cloudsupport_v2beta.cases.html +++ b/docs/dyn/cloudsupport_v2beta.cases.html @@ -150,7 +150,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -158,7 +158,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -192,7 +192,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -200,7 +200,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -232,7 +232,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -240,7 +240,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -289,7 +289,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -297,7 +297,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -336,7 +336,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -344,7 +344,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -393,7 +393,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -401,7 +401,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -436,7 +436,7 @@

Method Details

Update a case. Only some fields can be updated. EXAMPLES: cURL: ```shell case="projects/some-project/cases/43595344" curl \ --request PATCH \ --header "Authorization: Bearer $(gcloud auth print-access-token)" \ --header "Content-Type: application/json" \ --data '{ "priority": "P1" }' \ "https://cloudsupport.googleapis.com/v2/$case?updateMask=priority" ``` Python: ```python import googleapiclient.discovery api_version = "v2" supportApiService = googleapiclient.discovery.build( serviceName="cloudsupport", version=api_version, discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", ) request = supportApiService.cases().patch( name="projects/some-project/cases/43112854", body={ "displayName": "This is Now a New Title", "priority": "P2", }, ) print(request.execute()) ```
 
 Args:
-  name: string, The resource name for the case. (required)
+  name: string, Identifier. The resource name for the case. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -452,7 +452,7 @@ 

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -460,7 +460,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -493,7 +493,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -501,7 +501,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -545,7 +545,7 @@

Method Details

"createTime": "A String", # Output only. The time this case was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # The user who created the case. Note: The name and email will be obfuscated if the case was created by Google Support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -553,7 +553,7 @@

Method Details

"displayName": "A String", # The short summary of the issue reported in this case. "escalated": True or False, # Whether the case is currently escalated. "languageCode": "A String", # The language the user has requested to receive support in. This should be a BCP 47 language code (e.g., `"en"`, `"zh-CN"`, `"zh-TW"`, `"ja"`, `"ko"`). If no language or an unsupported language is specified, this field defaults to English (en). Language selection during case creation may affect your available support options. For a list of supported languages and their support working hours, see: https://cloud.google.com/support/docs/language-working-hours - "name": "A String", # The resource name for the case. + "name": "A String", # Identifier. The resource name for the case. "priority": "A String", # The priority of this case. "severity": "A String", # REMOVED. The severity of this case. Use priority instead. "state": "A String", # Output only. The current status of the support case. @@ -607,13 +607,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, "comment": { # A comment associated with a support case. Case comments are the primary way for Google Support to communicate with a user who has opened a case. When a user responds to Google Support, the user's responses also appear as comments. # Output only. A comment added to the case. @@ -621,7 +621,7 @@

Method Details

"createTime": "A String", # Output only. The time when the comment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent who created the comment. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, @@ -632,19 +632,19 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, "emailMessage": { # An email associated with a support case. # Output only. An email message received in reply to the case. "actor": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user or Google Support agent that created this email message. This is inferred from the headers on the email message. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, diff --git a/docs/dyn/cloudsupport_v2beta.media.html b/docs/dyn/cloudsupport_v2beta.media.html index 4c5fe76302..a75fa1e26b 100644 --- a/docs/dyn/cloudsupport_v2beta.media.html +++ b/docs/dyn/cloudsupport_v2beta.media.html @@ -364,13 +364,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }, } @@ -389,13 +389,13 @@

Method Details

"createTime": "A String", # Output only. The time at which the attachment was created. "creator": { # An Actor represents an entity that performed an action. For example, an actor could be a user who posted a comment on a support case, a user who uploaded an attachment, or a service account that created a support case. # Output only. The user who uploaded the attachment. Note, the name and email will be obfuscated if the attachment was uploaded by Google support. "displayName": "A String", # The name to display for the actor. If not provided, it is inferred from credentials supplied during case creation. When an email is provided, a display name must also be provided. This will be obfuscated if the user is a Google Support agent. - "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead. + "email": "A String", # The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead. "googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, "filename": "A String", # The filename of the attachment (e.g. `"graph.jpg"`). "mimeType": "A String", # Output only. The MIME type of the attachment (e.g. text/plain). - "name": "A String", # Output only. The resource name of the attachment. + "name": "A String", # Output only. Identifier. The resource name of the attachment. "sizeBytes": "A String", # Output only. The size of the attachment in bytes. }
diff --git a/docs/dyn/composer_v1.projects.locations.environments.html b/docs/dyn/composer_v1.projects.locations.environments.html index fd9c0b5698..f40250a7b7 100644 --- a/docs/dyn/composer_v1.projects.locations.environments.html +++ b/docs/dyn/composer_v1.projects.locations.environments.html @@ -128,6 +128,9 @@

Instance Methods

pollAirflowCommand(environment, body=None, x__xgafv=None)

Polls Airflow CLI command execution and fetches logs.

+

+ restartWebServer(name, body=None, x__xgafv=None)

+

Restart Airflow web server.

saveSnapshot(environment, body=None, x__xgafv=None)

Creates a snapshots of a Cloud Composer environment. As a result of this operation, snapshot of environment's state is stored in a location specified in the SaveSnapshotRequest.

@@ -1189,6 +1192,47 @@

Method Details

}
+
+ restartWebServer(name, body=None, x__xgafv=None) +
Restart Airflow web server.
+
+Args:
+  name: string, Required. The resource name of the environment to restart the web server for, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Restart Airflow web server.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
saveSnapshot(environment, body=None, x__xgafv=None)
Creates a snapshots of a Cloud Composer environment. As a result of this operation, snapshot of environment's state is stored in a location specified in the SaveSnapshotRequest.
diff --git a/docs/dyn/composer_v1beta1.projects.locations.environments.html b/docs/dyn/composer_v1beta1.projects.locations.environments.html
index 96b10595a8..79ed75f3a1 100644
--- a/docs/dyn/composer_v1beta1.projects.locations.environments.html
+++ b/docs/dyn/composer_v1beta1.projects.locations.environments.html
@@ -1205,7 +1205,7 @@ 

Method Details

Restart Airflow web server.
 
 Args:
-  name: string, The resource name of the environment to restart the web server for, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" (required)
+  name: string, Required. The resource name of the environment to restart the web server for, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" (required)
   body: object, The request body.
     The object takes the form of:
 
diff --git a/docs/dyn/compute_alpha.advice.html b/docs/dyn/compute_alpha.advice.html
index 3191809d09..1b89d4a827 100644
--- a/docs/dyn/compute_alpha.advice.html
+++ b/docs/dyn/compute_alpha.advice.html
@@ -77,6 +77,9 @@ 

Instance Methods

calendarMode(project, region, body=None, x__xgafv=None)

Advise how, where and when to create the requested amount of instances with specified accelerators, within the specified time and location limits. The method recommends creating future reservations for the requested resources.

+

+ capacity(project, region, size, body=None, x__xgafv=None)

+

Advice on making real-time decisions (such as choosing zone or machine types) during deployment to maximize your chances of obtaining capacity.

close()

Close httplib2 connections.

@@ -162,6 +165,70 @@

Method Details

}
+
+ capacity(project, region, size, body=None, x__xgafv=None) +
Advice on making real-time decisions (such as choosing zone or machine types) during deployment to maximize your chances of obtaining capacity.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region for this request. (required)
+  size: integer, Size of requested capacity. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A request to provide Assistant Scores. These scores determine VM obtainability and preemption likelihood.
+  "distributionPolicy": { # Policy specifying the distribution of instances across zones within the requested region.
+    "targetShape": "A String", # The distribution shape to which the group converges.
+    "zones": [ # Zones where Capacity Advisor looks for capacity.
+      {
+        "zone": "A String", # The URL of the zone.
+      },
+    ],
+  },
+  "instanceFlexibilityPolicy": { # Specification of alternative, flexible instance subsets. # Policy for instance selectors.
+    "instanceSelections": { # Named instance selections configure properties. The key is an arbitrary, unique RFC1035 string that identifies the instance selection.
+      "a_key": { # Machine specification.
+        "machineTypes": [ # Full machine-type names, e.g. "n1-standard-16".
+          "A String",
+        ],
+      },
+    },
+  },
+  "instanceProperties": { # Instance provisining properties. # Instance properties for this request.
+    "scheduling": { # Defines the instance scheduling options. # Specifies the scheduling options.
+      "provisioningModel": "A String", # Specifies the provisioning model of the instance.
+    },
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A response contains multiple scoring recommendations.
+  "recommendations": [ # Initially the API will provide one recommendation which balances the individual scores according to Google's preference.
+    {
+      "scores": { # The Scores message groups information about a shard of capacity.
+        "obtainability": 3.14, # The obtainability score indicates the likelihood of successfully obtaining (provisioning) the requested number of VMs. The score range is 0.0 through 1.0. Higher is better.
+        "spotPreemption": 3.14, # The preemption score indicates the likelihood that your Spot VMs is preempted. For more information about the preemption process, see Preemption of Spot VMs. The score range is 0.0 through 1.0. Higher is better.
+      },
+      "shards": [
+        { # Shards represent blocks of uniform capacity in recommendations. Each shard is for a single zone, single instance selection, and a single machine shape. Each shard defines a size expressed as the number of VMs.
+          "instanceCount": 42,
+          "machineType": "A String", # The machine type corresponds to the instance selection in the request.
+          "provisioningModel": "A String", # Provisioning model of the recommended capacity.
+          "zone": "A String", # The zone name for this shard.
+        },
+      ],
+    },
+  ],
+}
+
+
close()
Close httplib2 connections.
diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html index 48a9eaaf15..b21bbfebbd 100644 --- a/docs/dyn/compute_alpha.backendServices.html +++ b/docs/dyn/compute_alpha.backendServices.html @@ -482,7 +482,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1279,7 +1279,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2156,7 +2156,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2777,7 +2777,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -3296,7 +3296,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -3831,7 +3831,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -4835,7 +4835,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. diff --git a/docs/dyn/compute_alpha.html b/docs/dyn/compute_alpha.html index d5a5847fee..12117303be 100644 --- a/docs/dyn/compute_alpha.html +++ b/docs/dyn/compute_alpha.html @@ -374,6 +374,11 @@

Instance Methods

Returns the regionAutoscalers Resource.

+

+ regionBackendBuckets() +

+

Returns the regionBackendBuckets Resource.

+

regionBackendServices()

diff --git a/docs/dyn/compute_alpha.instanceGroupManagers.html b/docs/dyn/compute_alpha.instanceGroupManagers.html index 5ae586254e..9f6decc7b0 100644 --- a/docs/dyn/compute_alpha.instanceGroupManagers.html +++ b/docs/dyn/compute_alpha.instanceGroupManagers.html @@ -482,10 +482,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -546,8 +546,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -1501,10 +1501,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -1565,8 +1565,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -1759,10 +1759,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -1823,8 +1823,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -2147,10 +2147,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -2211,8 +2211,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -2765,10 +2765,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -2829,8 +2829,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -4695,10 +4695,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -4759,8 +4759,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. diff --git a/docs/dyn/compute_alpha.regionBackendBuckets.html b/docs/dyn/compute_alpha.regionBackendBuckets.html new file mode 100644 index 0000000000..a88f2029b4 --- /dev/null +++ b/docs/dyn/compute_alpha.regionBackendBuckets.html @@ -0,0 +1,1084 @@ + + + +

Compute Engine API . regionBackendBuckets

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ delete(project, region, backendBucket, forceDelete=None, requestId=None, x__xgafv=None)

+

Deletes the specified regional BackendBucket resource.

+

+ get(project, region, backendBucket, x__xgafv=None)

+

Returns the specified regional BackendBucket resource.

+

+ getIamPolicy(project, region, resource, optionsRequestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. May be empty if no such policy or resource exists.

+

+ insert(project, region, body=None, requestId=None, x__xgafv=None)

+

Creates a RegionBackendBucket in the specified project in the given scope using the parameters that are included in the request.

+

+ list(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Retrieves the list of BackendBucket resources available to the specified project in the given region.

+

+ listUsable(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Retrieves a list of all usable backend buckets in the specified project in the given region.

+

+ listUsable_next()

+

Retrieves the next page of results.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(project, region, backendBucket, body=None, requestId=None, x__xgafv=None)

+

Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.

+

+ setIamPolicy(project, region, resource, body=None, x__xgafv=None)

+

Sets the access control policy on the specified resource. Replaces any existing policy.

+

+ testIamPermissions(project, region, resource, body=None, x__xgafv=None)

+

Returns permissions that a caller has on the specified resource.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ delete(project, region, backendBucket, forceDelete=None, requestId=None, x__xgafv=None) +
Deletes the specified regional BackendBucket resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region scoping this request. (required)
+  backendBucket: string, Name of the BackendBucket resource to delete. (required)
+  forceDelete: boolean, Force delete the backend bucket even if it is still in use by other resources. It's intended to be used internally only for requests from wipeout.
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. Note that completed Operation resources have a limited retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated.
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations.
+}
+
+ +
+ get(project, region, backendBucket, x__xgafv=None) +
Returns the specified regional BackendBucket resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region scoping this request. (required)
+  backendBucket: string, Name of the BackendBucket resource to return. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a Cloud Storage Bucket resource. This Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.
+  "bucketName": "A String", # Cloud Storage bucket name.
+  "cdnPolicy": { # Message containing Cloud CDN configuration for a backend bucket. # Cloud CDN configuration for this BackendBucket.
+    "bypassCacheOnRequestHeaders": [ # Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.
+      { # Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.
+        "headerName": "A String", # The header field name to match on when bypassing cache. Values are case-insensitive.
+      },
+    ],
+    "cacheKeyPolicy": { # Message containing what to include in the cache key for a request for Cloud CDN. # The CacheKeyPolicy for this CdnPolicy.
+      "includeHttpHeaders": [ # Allows HTTP request headers (by name) to be used in the cache key.
+        "A String",
+      ],
+      "queryStringWhitelist": [ # Names of query string parameters to include in cache keys. Default parameters are always included. '&' and '=' will be percent encoded and not treated as delimiters.
+        "A String",
+      ],
+    },
+    "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.
+    "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).
+    "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-maxage). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "negativeCaching": True or False, # Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.
+    "negativeCachingPolicy": [ # Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.
+      { # Specify CDN TTLs for response error codes.
+        "code": 42, # The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.
+        "ttl": 42, # The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+      },
+    ],
+    "requestCoalescing": True or False, # If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.
+    "serveWhileStale": 42, # Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-maxage) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.
+    "signedUrlCacheMaxAgeSec": "A String", # Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.
+    "signedUrlKeyNames": [ # [Output Only] Names of the keys for signing request URLs.
+      "A String",
+    ],
+  },
+  "compressionMode": "A String", # Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
+  "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format.
+  "customResponseHeaders": [ # Headers that the Application Load Balancer should add to proxied responses.
+    "A String",
+  ],
+  "description": "A String", # An optional textual description of the resource; provided by the client when the resource is created.
+  "edgeSecurityPolicy": "A String", # [Output Only] The resource URL for the edge security policy associated with this backend bucket.
+  "enableCdn": True or False, # If true, enable Cloud CDN for this BackendBucket.
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "kind": "compute#backendBucket", # Type of the resource.
+  "loadBalancingScheme": "A String", # The value can only be INTERNAL_MANAGED for cross-region internal layer 7 load balancer. If loadBalancingScheme is not specified, the backend bucket can be used by classic global external load balancers, or global application external load balancers, or both.
+  "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "usedBy": [ # [Output Only] List of resources referencing that backend bucket.
+    {
+      "reference": "A String", # [Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.
+    },
+  ],
+}
+
+ +
+ getIamPolicy(project, region, resource, optionsRequestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. May be empty if no such policy or resource exists.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  optionsRequestedPolicyVersion: integer, Requested IAM Policy version.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ insert(project, region, body=None, requestId=None, x__xgafv=None) +
Creates a RegionBackendBucket in the specified project in the given scope using the parameters that are included in the request.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region of this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents a Cloud Storage Bucket resource. This Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.
+  "bucketName": "A String", # Cloud Storage bucket name.
+  "cdnPolicy": { # Message containing Cloud CDN configuration for a backend bucket. # Cloud CDN configuration for this BackendBucket.
+    "bypassCacheOnRequestHeaders": [ # Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.
+      { # Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.
+        "headerName": "A String", # The header field name to match on when bypassing cache. Values are case-insensitive.
+      },
+    ],
+    "cacheKeyPolicy": { # Message containing what to include in the cache key for a request for Cloud CDN. # The CacheKeyPolicy for this CdnPolicy.
+      "includeHttpHeaders": [ # Allows HTTP request headers (by name) to be used in the cache key.
+        "A String",
+      ],
+      "queryStringWhitelist": [ # Names of query string parameters to include in cache keys. Default parameters are always included. '&' and '=' will be percent encoded and not treated as delimiters.
+        "A String",
+      ],
+    },
+    "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.
+    "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).
+    "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-maxage). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "negativeCaching": True or False, # Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.
+    "negativeCachingPolicy": [ # Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.
+      { # Specify CDN TTLs for response error codes.
+        "code": 42, # The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.
+        "ttl": 42, # The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+      },
+    ],
+    "requestCoalescing": True or False, # If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.
+    "serveWhileStale": 42, # Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-maxage) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.
+    "signedUrlCacheMaxAgeSec": "A String", # Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.
+    "signedUrlKeyNames": [ # [Output Only] Names of the keys for signing request URLs.
+      "A String",
+    ],
+  },
+  "compressionMode": "A String", # Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
+  "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format.
+  "customResponseHeaders": [ # Headers that the Application Load Balancer should add to proxied responses.
+    "A String",
+  ],
+  "description": "A String", # An optional textual description of the resource; provided by the client when the resource is created.
+  "edgeSecurityPolicy": "A String", # [Output Only] The resource URL for the edge security policy associated with this backend bucket.
+  "enableCdn": True or False, # If true, enable Cloud CDN for this BackendBucket.
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "kind": "compute#backendBucket", # Type of the resource.
+  "loadBalancingScheme": "A String", # The value can only be INTERNAL_MANAGED for cross-region internal layer 7 load balancer. If loadBalancingScheme is not specified, the backend bucket can be used by classic global external load balancers, or global application external load balancers, or both.
+  "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "usedBy": [ # [Output Only] List of resources referencing that backend bucket.
+    {
+      "reference": "A String", # [Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.
+    },
+  ],
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. Note that completed Operation resources have a limited retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated.
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations.
+}
+
+ +
+ list(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Retrieves the list of BackendBucket resources available to the specified project in the given region.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region of this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted literal"` `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name "instance", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.
+  maxResults: integer, The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Contains a list of BackendBucket resources.
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "items": [ # A list of BackendBucket resources.
+    { # Represents a Cloud Storage Bucket resource. This Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.
+      "bucketName": "A String", # Cloud Storage bucket name.
+      "cdnPolicy": { # Message containing Cloud CDN configuration for a backend bucket. # Cloud CDN configuration for this BackendBucket.
+        "bypassCacheOnRequestHeaders": [ # Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.
+          { # Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.
+            "headerName": "A String", # The header field name to match on when bypassing cache. Values are case-insensitive.
+          },
+        ],
+        "cacheKeyPolicy": { # Message containing what to include in the cache key for a request for Cloud CDN. # The CacheKeyPolicy for this CdnPolicy.
+          "includeHttpHeaders": [ # Allows HTTP request headers (by name) to be used in the cache key.
+            "A String",
+          ],
+          "queryStringWhitelist": [ # Names of query string parameters to include in cache keys. Default parameters are always included. '&' and '=' will be percent encoded and not treated as delimiters.
+            "A String",
+          ],
+        },
+        "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.
+        "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).
+        "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-maxage). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+        "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+        "negativeCaching": True or False, # Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.
+        "negativeCachingPolicy": [ # Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.
+          { # Specify CDN TTLs for response error codes.
+            "code": 42, # The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.
+            "ttl": 42, # The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+          },
+        ],
+        "requestCoalescing": True or False, # If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.
+        "serveWhileStale": 42, # Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-maxage) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.
+        "signedUrlCacheMaxAgeSec": "A String", # Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.
+        "signedUrlKeyNames": [ # [Output Only] Names of the keys for signing request URLs.
+          "A String",
+        ],
+      },
+      "compressionMode": "A String", # Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
+      "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format.
+      "customResponseHeaders": [ # Headers that the Application Load Balancer should add to proxied responses.
+        "A String",
+      ],
+      "description": "A String", # An optional textual description of the resource; provided by the client when the resource is created.
+      "edgeSecurityPolicy": "A String", # [Output Only] The resource URL for the edge security policy associated with this backend bucket.
+      "enableCdn": True or False, # If true, enable Cloud CDN for this BackendBucket.
+      "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+      "kind": "compute#backendBucket", # Type of the resource.
+      "loadBalancingScheme": "A String", # The value can only be INTERNAL_MANAGED for cross-region internal layer 7 load balancer. If loadBalancingScheme is not specified, the backend bucket can be used by classic global external load balancers, or global application external load balancers, or both.
+      "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
+      "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+      "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+      "usedBy": [ # [Output Only] List of resources referencing that backend bucket.
+        {
+          "reference": "A String", # [Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.
+        },
+      ],
+    },
+  ],
+  "kind": "compute#backendBucketList", # Type of resource.
+  "nextPageToken": "A String", # [Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.
+  "selfLink": "A String", # [Output Only] Server-defined URL for this resource.
+  "warning": { # [Output Only] Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ listUsable(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Retrieves a list of all usable backend buckets in the specified project in the given region.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region scoping this request. It must be a string that meets the requirements in RFC1035. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted literal"` `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name "instance", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.
+  maxResults: integer, The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "items": [ # A list of BackendBucket resources.
+    { # Represents a Cloud Storage Bucket resource. This Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.
+      "bucketName": "A String", # Cloud Storage bucket name.
+      "cdnPolicy": { # Message containing Cloud CDN configuration for a backend bucket. # Cloud CDN configuration for this BackendBucket.
+        "bypassCacheOnRequestHeaders": [ # Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.
+          { # Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.
+            "headerName": "A String", # The header field name to match on when bypassing cache. Values are case-insensitive.
+          },
+        ],
+        "cacheKeyPolicy": { # Message containing what to include in the cache key for a request for Cloud CDN. # The CacheKeyPolicy for this CdnPolicy.
+          "includeHttpHeaders": [ # Allows HTTP request headers (by name) to be used in the cache key.
+            "A String",
+          ],
+          "queryStringWhitelist": [ # Names of query string parameters to include in cache keys. Default parameters are always included. '&' and '=' will be percent encoded and not treated as delimiters.
+            "A String",
+          ],
+        },
+        "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.
+        "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).
+        "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-maxage). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+        "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+        "negativeCaching": True or False, # Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.
+        "negativeCachingPolicy": [ # Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.
+          { # Specify CDN TTLs for response error codes.
+            "code": 42, # The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.
+            "ttl": 42, # The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+          },
+        ],
+        "requestCoalescing": True or False, # If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.
+        "serveWhileStale": 42, # Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-maxage) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.
+        "signedUrlCacheMaxAgeSec": "A String", # Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.
+        "signedUrlKeyNames": [ # [Output Only] Names of the keys for signing request URLs.
+          "A String",
+        ],
+      },
+      "compressionMode": "A String", # Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
+      "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format.
+      "customResponseHeaders": [ # Headers that the Application Load Balancer should add to proxied responses.
+        "A String",
+      ],
+      "description": "A String", # An optional textual description of the resource; provided by the client when the resource is created.
+      "edgeSecurityPolicy": "A String", # [Output Only] The resource URL for the edge security policy associated with this backend bucket.
+      "enableCdn": True or False, # If true, enable Cloud CDN for this BackendBucket.
+      "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+      "kind": "compute#backendBucket", # Type of the resource.
+      "loadBalancingScheme": "A String", # The value can only be INTERNAL_MANAGED for cross-region internal layer 7 load balancer. If loadBalancingScheme is not specified, the backend bucket can be used by classic global external load balancers, or global application external load balancers, or both.
+      "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
+      "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+      "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+      "usedBy": [ # [Output Only] List of resources referencing that backend bucket.
+        {
+          "reference": "A String", # [Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.
+        },
+      ],
+    },
+  ],
+  "kind": "compute#usableBackendBucketList", # [Output Only] Type of resource. Always compute#usableBackendBucketList for lists of usable backend buckets.
+  "nextPageToken": "A String", # [Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.
+  "selfLink": "A String", # [Output Only] Server-defined URL for this resource.
+  "warning": { # [Output Only] Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ listUsable_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(project, region, backendBucket, body=None, requestId=None, x__xgafv=None) +
Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region scoping this request. (required)
+  backendBucket: string, Name of the BackendBucket resource to patch. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents a Cloud Storage Bucket resource. This Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.
+  "bucketName": "A String", # Cloud Storage bucket name.
+  "cdnPolicy": { # Message containing Cloud CDN configuration for a backend bucket. # Cloud CDN configuration for this BackendBucket.
+    "bypassCacheOnRequestHeaders": [ # Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.
+      { # Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.
+        "headerName": "A String", # The header field name to match on when bypassing cache. Values are case-insensitive.
+      },
+    ],
+    "cacheKeyPolicy": { # Message containing what to include in the cache key for a request for Cloud CDN. # The CacheKeyPolicy for this CdnPolicy.
+      "includeHttpHeaders": [ # Allows HTTP request headers (by name) to be used in the cache key.
+        "A String",
+      ],
+      "queryStringWhitelist": [ # Names of query string parameters to include in cache keys. Default parameters are always included. '&' and '=' will be percent encoded and not treated as delimiters.
+        "A String",
+      ],
+    },
+    "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.
+    "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).
+    "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-maxage). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+    "negativeCaching": True or False, # Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.
+    "negativeCachingPolicy": [ # Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.
+      { # Specify CDN TTLs for response error codes.
+        "code": 42, # The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.
+        "ttl": 42, # The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.
+      },
+    ],
+    "requestCoalescing": True or False, # If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.
+    "serveWhileStale": 42, # Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-maxage) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.
+    "signedUrlCacheMaxAgeSec": "A String", # Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.
+    "signedUrlKeyNames": [ # [Output Only] Names of the keys for signing request URLs.
+      "A String",
+    ],
+  },
+  "compressionMode": "A String", # Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
+  "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format.
+  "customResponseHeaders": [ # Headers that the Application Load Balancer should add to proxied responses.
+    "A String",
+  ],
+  "description": "A String", # An optional textual description of the resource; provided by the client when the resource is created.
+  "edgeSecurityPolicy": "A String", # [Output Only] The resource URL for the edge security policy associated with this backend bucket.
+  "enableCdn": True or False, # If true, enable Cloud CDN for this BackendBucket.
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "kind": "compute#backendBucket", # Type of the resource.
+  "loadBalancingScheme": "A String", # The value can only be INTERNAL_MANAGED for cross-region internal layer 7 load balancer. If loadBalancingScheme is not specified, the backend bucket can be used by classic global external load balancers, or global application external load balancers, or both.
+  "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "usedBy": [ # [Output Only] List of resources referencing that backend bucket.
+    {
+      "reference": "A String", # [Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.
+    },
+  ],
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. Note that completed Operation resources have a limited retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated.
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations.
+}
+
+ +
+ setIamPolicy(project, region, resource, body=None, x__xgafv=None) +
Sets the access control policy on the specified resource. Replaces any existing policy.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "bindings": [ # Flatten Policy to create a backwacd compatible wire-format. Deprecated. Use 'policy' to specify bindings.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.
+  "policy": { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). # REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them.
+    "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+      { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+        "auditLogConfigs": [ # The configuration for logging of each type of permission.
+          { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+            "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+              "A String",
+            ],
+            "logType": "A String", # The log type that this config enables.
+          },
+        ],
+        "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+      },
+    ],
+    "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+      { # Associates `members`, or principals, with a `role`.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+          "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+        },
+        "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+          "A String",
+        ],
+        "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+      },
+    ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ testIamPermissions(project, region, resource, body=None, x__xgafv=None) +
Returns permissions that a caller has on the specified resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "permissions": [ # The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is allowed.
+    "A String",
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index d4f2d19481..4159117b8e 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -452,7 +452,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1042,7 +1042,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1664,7 +1664,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2184,7 +2184,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2720,7 +2720,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -3596,7 +3596,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. diff --git a/docs/dyn/compute_alpha.regionInstanceGroupManagers.html b/docs/dyn/compute_alpha.regionInstanceGroupManagers.html index 6da605aa9b..251158ec37 100644 --- a/docs/dyn/compute_alpha.regionInstanceGroupManagers.html +++ b/docs/dyn/compute_alpha.regionInstanceGroupManagers.html @@ -1174,10 +1174,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -1238,8 +1238,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -1432,10 +1432,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -1496,8 +1496,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -1820,10 +1820,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -1884,8 +1884,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -2438,10 +2438,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -2502,8 +2502,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. @@ -4368,10 +4368,10 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. - "bulkInstanceOperation": { # [Output Only] Status of bulk instance operation. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. - "lastProgressCheck": { # [Output Only] Information from last progress check of bulk instance operation. - "error": { # [Output Only] Contains errors encountered during bulk instance operation. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. "errors": [ # [Output Only] The array of errors encountered while processing this operation. { "code": "A String", # [Output Only] The error type identifier for this error. @@ -4432,8 +4432,8 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. - "targetSizePolicy": { # Configures how target size of MIG is achieved. - "mode": "A String", # Mode in which operations on size are processed. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. }, "targetSizeUnit": "A String", # The unit of measure for the target size. "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. diff --git a/docs/dyn/compute_beta.backendServices.html b/docs/dyn/compute_beta.backendServices.html index 0b9a5f99e6..f86865aea2 100644 --- a/docs/dyn/compute_beta.backendServices.html +++ b/docs/dyn/compute_beta.backendServices.html @@ -475,7 +475,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1048,7 +1048,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1684,7 +1684,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2082,7 +2082,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2379,7 +2379,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2692,7 +2692,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -3471,7 +3471,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. diff --git a/docs/dyn/compute_beta.firewallPolicies.html b/docs/dyn/compute_beta.firewallPolicies.html index 1eb4d2b945..bc2e50507e 100644 --- a/docs/dyn/compute_beta.firewallPolicies.html +++ b/docs/dyn/compute_beta.firewallPolicies.html @@ -310,6 +310,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -334,6 +335,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -513,6 +515,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -537,6 +540,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -992,6 +996,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1016,6 +1021,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1073,6 +1079,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1097,6 +1104,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1244,6 +1252,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1268,6 +1277,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1337,6 +1347,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1361,6 +1372,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1442,6 +1454,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1466,6 +1479,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1523,6 +1537,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1547,6 +1562,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1764,6 +1780,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1788,6 +1805,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1845,6 +1863,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1869,6 +1888,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2137,6 +2157,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2161,6 +2182,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2218,6 +2240,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2242,6 +2265,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2426,6 +2450,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2450,6 +2475,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2630,6 +2656,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2654,6 +2681,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], diff --git a/docs/dyn/compute_beta.instanceGroupManagers.html b/docs/dyn/compute_beta.instanceGroupManagers.html index 7eb104cbd5..1984a08466 100644 --- a/docs/dyn/compute_beta.instanceGroupManagers.html +++ b/docs/dyn/compute_beta.instanceGroupManagers.html @@ -456,6 +456,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -472,6 +520,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -1388,6 +1439,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1404,6 +1503,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -1568,6 +1670,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1584,6 +1734,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -1877,6 +2030,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1893,6 +2094,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -2402,6 +2606,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -2418,6 +2670,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -4234,6 +4489,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -4250,6 +4553,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. diff --git a/docs/dyn/compute_beta.instanceTemplates.html b/docs/dyn/compute_beta.instanceTemplates.html index 8aeffb6639..3b62e25e38 100644 --- a/docs/dyn/compute_beta.instanceTemplates.html +++ b/docs/dyn/compute_beta.instanceTemplates.html @@ -313,6 +313,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -819,6 +820,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1193,6 +1195,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1652,6 +1655,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. diff --git a/docs/dyn/compute_beta.instances.html b/docs/dyn/compute_beta.instances.html index a1ee92074e..4280dfb78a 100644 --- a/docs/dyn/compute_beta.instances.html +++ b/docs/dyn/compute_beta.instances.html @@ -430,6 +430,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -930,6 +931,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1574,6 +1576,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -2558,6 +2561,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -2776,6 +2780,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2800,6 +2805,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2855,6 +2861,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2879,6 +2886,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -3565,6 +3573,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -4081,6 +4090,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -8058,6 +8068,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -8671,6 +8682,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. diff --git a/docs/dyn/compute_beta.machineImages.html b/docs/dyn/compute_beta.machineImages.html index ce40eb8938..a66ea1ec26 100644 --- a/docs/dyn/compute_beta.machineImages.html +++ b/docs/dyn/compute_beta.machineImages.html @@ -427,6 +427,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -667,6 +668,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -998,6 +1000,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1238,6 +1241,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1650,6 +1654,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1890,6 +1895,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. diff --git a/docs/dyn/compute_beta.networkFirewallPolicies.html b/docs/dyn/compute_beta.networkFirewallPolicies.html index 53a86a6081..ae9e5908fa 100644 --- a/docs/dyn/compute_beta.networkFirewallPolicies.html +++ b/docs/dyn/compute_beta.networkFirewallPolicies.html @@ -312,6 +312,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -336,6 +337,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -518,6 +520,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -542,6 +545,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -759,6 +763,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -783,6 +788,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -840,6 +846,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -864,6 +871,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1255,6 +1263,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1279,6 +1288,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1336,6 +1346,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1360,6 +1371,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1510,6 +1522,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1534,6 +1547,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1604,6 +1618,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1628,6 +1643,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1710,6 +1726,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1734,6 +1751,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1791,6 +1809,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1815,6 +1834,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2031,6 +2051,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2055,6 +2076,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2112,6 +2134,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2136,6 +2159,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2252,6 +2276,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2276,6 +2301,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2333,6 +2359,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2357,6 +2384,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2542,6 +2570,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2566,6 +2595,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2747,6 +2777,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2771,6 +2802,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], diff --git a/docs/dyn/compute_beta.networks.html b/docs/dyn/compute_beta.networks.html index 606eea3e9d..a720df721c 100644 --- a/docs/dyn/compute_beta.networks.html +++ b/docs/dyn/compute_beta.networks.html @@ -535,6 +535,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -559,6 +560,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -614,6 +616,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -638,6 +641,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], diff --git a/docs/dyn/compute_beta.regionBackendServices.html b/docs/dyn/compute_beta.regionBackendServices.html index 621776fb9e..105e202350 100644 --- a/docs/dyn/compute_beta.regionBackendServices.html +++ b/docs/dyn/compute_beta.regionBackendServices.html @@ -445,7 +445,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -813,7 +813,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1212,7 +1212,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1510,7 +1510,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -1824,7 +1824,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. @@ -2476,7 +2476,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "networkPassThroughLbTrafficPolicy": { # Configures traffic steering properties of internal passthrough Network Load Balancers. networkPassThroughLbTrafficPolicy cannot be specified with haPolicy. "zonalAffinity": { # When configured, new connections are load balanced across healthy backend endpoints in the local zone. "spillover": "A String", # This field indicates whether zonal affinity is enabled or not. The possible values are: - ZONAL_AFFINITY_DISABLED: Default Value. Zonal Affinity is disabled. The load balancer distributes new connections to all healthy backend endpoints across all zones. - ZONAL_AFFINITY_STAY_WITHIN_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there are no healthy backend endpoints in the local zone, the load balancer distributes new connections to all backend endpoints in the local zone. - ZONAL_AFFINITY_SPILL_CROSS_ZONE: Zonal Affinity is enabled. The load balancer distributes new connections to all healthy backend endpoints in the local zone only. If there aren't enough healthy backend endpoints in the local zone, the load balancer distributes new connections to all healthy backend endpoints across all zones. diff --git a/docs/dyn/compute_beta.regionInstanceGroupManagers.html b/docs/dyn/compute_beta.regionInstanceGroupManagers.html index 2a529ac848..6097e209b5 100644 --- a/docs/dyn/compute_beta.regionInstanceGroupManagers.html +++ b/docs/dyn/compute_beta.regionInstanceGroupManagers.html @@ -1142,6 +1142,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1158,6 +1206,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -1322,6 +1373,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1338,6 +1437,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -1631,6 +1733,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -1647,6 +1797,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -2156,6 +2309,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -2172,6 +2373,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. @@ -3988,6 +4192,54 @@

Method Details

"effective": True or False, # [Output Only] A bit indicating whether this configuration has been applied to all managed instances in the group. }, "autoscaler": "A String", # [Output Only] The URL of the Autoscaler that targets this instance group manager. + "bulkInstanceOperation": { # Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK. # [Output Only] The status of bulk instance operation. + "inProgress": True or False, # [Output Only] Informs whether bulk instance operation is in progress. + "lastProgressCheck": { # [Output Only] Information from the last progress check of bulk instance operation. + "error": { # [Output Only] Errors encountered during bulk instance operation. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{"instanceLimit": "100/request"}`, should be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "timestamp": "A String", # [Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format. + }, + }, "isStable": True or False, # [Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. "stateful": { # [Output Only] Stateful status of the given Instance Group Manager. "hasStatefulConfig": True or False, # [Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. @@ -4004,6 +4256,9 @@

Method Details

"A String", ], "targetSize": 42, # The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number. + "targetSizePolicy": { # The policy that specifies how the MIG creates its VMs to achieve the target size. + "mode": "A String", # The mode of target size policy based on which the MIG creates its VMs individually or all at once. + }, "targetStoppedSize": 42, # The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. "targetSuspendedSize": 42, # The target number of suspended instances for this managed instance group. This number changes when you: - Suspend instance using the suspendInstances method or resume instances using the resumeInstances method. - Manually change the targetSuspendedSize using the update method. "updatePolicy": { # The update policy for this managed instance group. diff --git a/docs/dyn/compute_beta.regionInstanceTemplates.html b/docs/dyn/compute_beta.regionInstanceTemplates.html index 3be82a8e3f..b4693d6f33 100644 --- a/docs/dyn/compute_beta.regionInstanceTemplates.html +++ b/docs/dyn/compute_beta.regionInstanceTemplates.html @@ -423,6 +423,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -749,6 +750,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. @@ -1209,6 +1211,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. diff --git a/docs/dyn/compute_beta.regionInstances.html b/docs/dyn/compute_beta.regionInstances.html index 5943ebd63a..da13a4bb64 100644 --- a/docs/dyn/compute_beta.regionInstances.html +++ b/docs/dyn/compute_beta.regionInstances.html @@ -266,6 +266,7 @@

Method Details

}, ], "fingerprint": "A String", # Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + "igmpQuery": "A String", # Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. "internalIpv6PrefixLength": 42, # The prefix length of the primary internal IPv6 range. "ipv6AccessConfigs": [ # An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. { # An access configuration attached to an instance's network interface. Only one access config per instance is supported. diff --git a/docs/dyn/compute_beta.regionNetworkFirewallPolicies.html b/docs/dyn/compute_beta.regionNetworkFirewallPolicies.html index d9345d1b78..33eef6bedb 100644 --- a/docs/dyn/compute_beta.regionNetworkFirewallPolicies.html +++ b/docs/dyn/compute_beta.regionNetworkFirewallPolicies.html @@ -303,6 +303,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -327,6 +328,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -790,6 +792,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -814,6 +817,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -871,6 +875,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -895,6 +900,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1003,6 +1009,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1027,6 +1034,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1082,6 +1090,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1106,6 +1115,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1290,6 +1300,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1314,6 +1325,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1397,6 +1409,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1421,6 +1434,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1478,6 +1492,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1502,6 +1517,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1719,6 +1735,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1743,6 +1760,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1800,6 +1818,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1824,6 +1843,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -1941,6 +1961,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -1965,6 +1986,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2022,6 +2044,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2046,6 +2069,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], @@ -2369,6 +2393,7 @@

Method Details

"A String", ], "destNetworkScope": "A String", # Network scope of the traffic destination. + "destNetworkType": "A String", # Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET "destRegionCodes": [ # Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. "A String", ], @@ -2393,6 +2418,7 @@

Method Details

"A String", ], "srcNetworkScope": "A String", # Network scope of the traffic source. + "srcNetworkType": "A String", # Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS "srcNetworks": [ # Networks of the traffic source. It can be either a full or partial url. "A String", ], diff --git a/docs/dyn/compute_v1.backendServices.html b/docs/dyn/compute_v1.backendServices.html index 82b1bc017f..27b9abc9a3 100644 --- a/docs/dyn/compute_v1.backendServices.html +++ b/docs/dyn/compute_v1.backendServices.html @@ -463,7 +463,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1009,7 +1009,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1347,7 +1347,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1718,7 +1718,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1988,7 +1988,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -2274,7 +2274,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -3026,7 +3026,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. diff --git a/docs/dyn/compute_v1.firewalls.html b/docs/dyn/compute_v1.firewalls.html index 7d5a73d74f..0d4c62b46b 100644 --- a/docs/dyn/compute_v1.firewalls.html +++ b/docs/dyn/compute_v1.firewalls.html @@ -275,6 +275,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. @@ -336,6 +341,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. @@ -526,6 +536,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. @@ -617,6 +632,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. @@ -795,6 +815,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. diff --git a/docs/dyn/compute_v1.instances.html b/docs/dyn/compute_v1.instances.html index 4fbab26f93..ef453e1f40 100644 --- a/docs/dyn/compute_v1.instances.html +++ b/docs/dyn/compute_v1.instances.html @@ -2459,6 +2459,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. diff --git a/docs/dyn/compute_v1.networks.html b/docs/dyn/compute_v1.networks.html index 3a4f705df5..9b8f68be9a 100644 --- a/docs/dyn/compute_v1.networks.html +++ b/docs/dyn/compute_v1.networks.html @@ -659,6 +659,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. diff --git a/docs/dyn/compute_v1.regionBackendServices.html b/docs/dyn/compute_v1.regionBackendServices.html index df7a51900d..cdf5753f67 100644 --- a/docs/dyn/compute_v1.regionBackendServices.html +++ b/docs/dyn/compute_v1.regionBackendServices.html @@ -436,7 +436,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -777,7 +777,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1149,7 +1149,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1420,7 +1420,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -1707,7 +1707,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. @@ -2332,7 +2332,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL. + "network": "A String", # The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled. "outlierDetection": { # Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. # Settings controlling the ejection of unhealthy backend endpoints from the load balancing pool of each individual proxy instance that processes the traffic for the given backend service. If not set, this feature is considered disabled. Results of the outlier detection algorithm (ejection of endpoints from the load balancing pool and returning them back to the pool) are executed independently by each proxy instance of the load balancer. In most cases, more than one proxy instance handles the traffic received by a backend service. Thus, it is possible that an unhealthy endpoint is detected and ejected by only some of the proxies, and while this happens, other proxies may continue to send requests to the same unhealthy endpoint until they detect and eject the unhealthy endpoint. Applicable backend endpoints can be: - VM instances in an Instance Group - Endpoints in a Zonal NEG (GCE_VM_IP, GCE_VM_IP_PORT) - Endpoints in a Hybrid Connectivity NEG (NON_GCP_PRIVATE_IP_PORT) - Serverless NEGs, that resolve to Cloud Run, App Engine, or Cloud Functions Services - Private Service Connect NEGs, that resolve to Google-managed regional API endpoints or managed services published using Private Service Connect Applicable backend service types can be: - A global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - A regional backend service with the serviceProtocol set to HTTP, HTTPS, HTTP2 or H2C, and loadBalancingScheme set to INTERNAL_MANAGED or EXTERNAL_MANAGED. Not supported for Serverless NEGs. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. "baseEjectionTime": { # A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. # The base time that a backend endpoint is ejected for. Defaults to 30000ms or 30s. After a backend endpoint is returned back to the load balancing pool, it can be ejected again in another ejection analysis. Thus, the total ejection time is equal to the base ejection time multiplied by the number of times the backend endpoint has been ejected. Defaults to 30000ms or 30s. "nanos": 42, # Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. diff --git a/docs/dyn/compute_v1.regionNetworkFirewallPolicies.html b/docs/dyn/compute_v1.regionNetworkFirewallPolicies.html index 3e74104e9c..94a8deed4d 100644 --- a/docs/dyn/compute_v1.regionNetworkFirewallPolicies.html +++ b/docs/dyn/compute_v1.regionNetworkFirewallPolicies.html @@ -1145,6 +1145,11 @@

Method Details

}, "name": "A String", # Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit. "network": "A String", # URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default + "params": { # Additional firewall parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "sourceRanges": [ # If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported. diff --git a/docs/dyn/compute_v1.routers.html b/docs/dyn/compute_v1.routers.html index 807f3c32c4..473dd55e14 100644 --- a/docs/dyn/compute_v1.routers.html +++ b/docs/dyn/compute_v1.routers.html @@ -1026,6 +1026,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route @@ -1075,6 +1080,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route @@ -1126,6 +1136,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route diff --git a/docs/dyn/compute_v1.routes.html b/docs/dyn/compute_v1.routes.html index 9e5e042a3c..283eafe06f 100644 --- a/docs/dyn/compute_v1.routes.html +++ b/docs/dyn/compute_v1.routes.html @@ -264,6 +264,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route @@ -322,6 +327,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route @@ -509,6 +519,11 @@

Method Details

"nextHopOrigin": "A String", # [Output Only] Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. "nextHopPeering": "A String", # [Output Only] The network peering name that should handle matching packets, which should conform to RFC1035. "nextHopVpnTunnel": "A String", # The URL to a VpnTunnel that should handle matching packets. + "params": { # Additional route parameters. # Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} * {"123/environment" : "production", "345/abc" : "xyz"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {"123/environment" : "tagValues/444"} is invalid. + "a_key": "A String", + }, + }, "priority": 42, # The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive. "routeStatus": "A String", # [Output only] The status of the route. "routeType": "A String", # [Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route diff --git a/docs/dyn/connectors_v1.projects.locations.connections.html b/docs/dyn/connectors_v1.projects.locations.connections.html index 7b2412ed84..1435e5d6e8 100644 --- a/docs/dyn/connectors_v1.projects.locations.connections.html +++ b/docs/dyn/connectors_v1.projects.locations.connections.html @@ -2643,7 +2643,7 @@

Method Details

"updateTime": "A String", # Output only. Updated time. } - updateMask: string, Required. The list of fields to update. Fields are specified relative to the connection. A field will be overwritten if it is in the mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. You can modify only the fields listed below. To lock/unlock a connection: * `lock_config` To suspend/resume a connection: * `suspended` To update the connection details: * `description` * `labels` * `connector_version` * `config_variables` * `auth_config` * `destination_configs` * `node_config` * `log_config` * `ssl_config` * `eventing_enablement_type` * `eventing_config` * `auth_override_enabled` + updateMask: string, Required. The list of fields to update. Fields are specified relative to the connection. A field will be overwritten if it is in the mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. You can modify only the fields listed below. To lock/unlock a connection: * `lock_config` To suspend/resume a connection: * `suspended` To update the connection details: * `description` * `labels` * `connector_version` * `config_variables` * `auth_config` * `destination_configs` * `node_config` * `log_config` * `ssl_config` * `eventing_enablement_type` * `eventing_config` * `auth_override_enabled` * `async_operations_enabled` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/connectors_v1.projects.locations.endpointAttachments.html b/docs/dyn/connectors_v1.projects.locations.endpointAttachments.html index fd738f421b..e4efe3c0bc 100644 --- a/docs/dyn/connectors_v1.projects.locations.endpointAttachments.html +++ b/docs/dyn/connectors_v1.projects.locations.endpointAttachments.html @@ -124,7 +124,7 @@

Method Details

"updateTime": "A String", # Output only. Updated time. } - endpointAttachmentId: string, Required. Identifier to assign to the EndpointAttachment. Must be unique within scope of the parent resource. + endpointAttachmentId: string, Required. Identifier to assign to the EndpointAttachment. Must be unique within scope of the parent resource. The regex is: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html index 7dec5055fd..f5e1b392e0 100644 --- a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html +++ b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html @@ -86,10 +86,10 @@

Instance Methods

fetchAuthSchema(name, view=None, x__xgafv=None)

fetch and return the list of auth config variables required to override the connection backend auth.

- get(name, schemaView=None, view=None, x__xgafv=None)

+ get(name, view=None, x__xgafv=None)

Gets details of a single connector version.

- list(parent, pageSize=None, pageToken=None, schemaView=None, view=None, x__xgafv=None)

+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)

Lists Connector Versions in a given project and location.

list_next()

@@ -160,16 +160,11 @@

Method Details

- get(name, schemaView=None, view=None, x__xgafv=None) + get(name, view=None, x__xgafv=None)
Gets details of a single connector version.
 
 Args:
   name: string, Required. Resource name of the form: `projects/*/locations/*/providers/*/connectors/*/versions/*` Only global location is supported for ConnectorVersion resource. (required)
-  schemaView: string, Optional. Enum to control whether schema enrichment related fields should be included in the response.
-    Allowed values
-      CONNECTOR_VERSION_SCHEMA_VIEW_UNSPECIFIED - VIEW_UNSPECIFIED. The unset value. Defaults to BASIC View.
-      CONNECTOR_VERSION_SCHEMA_VIEW_BASIC - Return basic connector version schema.
-      CONNECTOR_VERSION_SCHEMA_VIEW_ENRICHED - Return enriched connector version schema.
   view: string, Specifies which fields of the ConnectorVersion are returned in the response. Defaults to `CUSTOMER` view.
     Allowed values
       CONNECTOR_VERSION_VIEW_UNSPECIFIED - CONNECTOR_VERSION_VIEW_UNSPECIFIED.
@@ -881,14 +876,6 @@ 

Method Details

}, "launchStage": "A String", # Output only. Flag to mark the version indicating the launch stage. "name": "A String", # Output only. Resource name of the Version. Format: projects/{project}/locations/{location}/providers/{provider}/connectors/{connector}/versions/{version} Only global location is supported for Connector resource. - "priorityEntityTypes": [ # Optional. The priority entity types for the connector version. - { # PriorityEntityType represents an entity type with its associated priority and order. - "description": "A String", # The description of the entity type. - "id": "A String", # The entity type. - "order": 42, # The order of the entity type within its priority group. - "priority": "A String", # The priority of the entity type, such as P0, P1, etc. - }, - ], "releaseVersion": "A String", # Output only. ReleaseVersion of the connector, for example: "1.0.1-alpha". "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Output only. Role grant configuration for this config variable. It will be DEPRECATED soon. "helperTextTemplate": "A String", # Optional. Template that UI can use to provide helper text to customers. @@ -1022,18 +1009,13 @@

Method Details

- list(parent, pageSize=None, pageToken=None, schemaView=None, view=None, x__xgafv=None) + list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)
Lists Connector Versions in a given project and location.
 
 Args:
-  parent: string, Required. Parent resource of the connectors, of the form: `projects/*/locations/*/providers/*/connectors/*` Only global location is supported for ConnectorVersion resource. (required)
+  parent: string, A parameter (required)
   pageSize: integer, Page size.
   pageToken: string, Page token.
-  schemaView: string, Optional. Enum to control whether schema enrichment related fields should be included in the response.
-    Allowed values
-      CONNECTOR_VERSION_SCHEMA_VIEW_UNSPECIFIED - VIEW_UNSPECIFIED. The unset value. Defaults to BASIC View.
-      CONNECTOR_VERSION_SCHEMA_VIEW_BASIC - Return basic connector version schema.
-      CONNECTOR_VERSION_SCHEMA_VIEW_ENRICHED - Return enriched connector version schema.
   view: string, Specifies which fields of the ConnectorVersion are returned in the response. Defaults to `BASIC` view.
     Allowed values
       CONNECTOR_VERSION_VIEW_UNSPECIFIED - CONNECTOR_VERSION_VIEW_UNSPECIFIED.
@@ -1747,14 +1729,6 @@ 

Method Details

}, "launchStage": "A String", # Output only. Flag to mark the version indicating the launch stage. "name": "A String", # Output only. Resource name of the Version. Format: projects/{project}/locations/{location}/providers/{provider}/connectors/{connector}/versions/{version} Only global location is supported for Connector resource. - "priorityEntityTypes": [ # Optional. The priority entity types for the connector version. - { # PriorityEntityType represents an entity type with its associated priority and order. - "description": "A String", # The description of the entity type. - "id": "A String", # The entity type. - "order": 42, # The order of the entity type within its priority group. - "priority": "A String", # The priority of the entity type, such as P0, P1, etc. - }, - ], "releaseVersion": "A String", # Output only. ReleaseVersion of the connector, for example: "1.0.1-alpha". "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Output only. Role grant configuration for this config variable. It will be DEPRECATED soon. "helperTextTemplate": "A String", # Optional. Template that UI can use to provide helper text to customers. diff --git a/docs/dyn/connectors_v2.projects.locations.connections.entityTypes.entities.html b/docs/dyn/connectors_v2.projects.locations.connections.entityTypes.entities.html index 111b8b992a..c58788f350 100644 --- a/docs/dyn/connectors_v2.projects.locations.connections.entityTypes.entities.html +++ b/docs/dyn/connectors_v2.projects.locations.connections.entityTypes.entities.html @@ -90,7 +90,7 @@

Instance Methods

get(name, x__xgafv=None)

Gets a single entity row matching the entity type and entity id specified in the request.

- list(parent, conditions=None, pageSize=None, pageToken=None, sortBy=None, x__xgafv=None)

+ list(parent, conditions=None, pageSize=None, pageToken=None, sortBy=None, sortOrder=None, x__xgafv=None)

Lists entity rows of a particular entity type contained in the request. Note: 1. Currently, only max of one 'sort_by' column is supported. 2. If no 'sort_by' column is provided, the primary key of the table is used. If zero or more than one primary key is available, we default to the unpaginated list entities logic which only returns the first page. 3. The values of the 'sort_by' columns must uniquely identify an entity row, otherwise undefined behaviors may be observed during pagination. 4. Since transactions are not supported, any updates, inserts or deletes during pagination can lead to stale data being returned or other unexpected behaviors.

list_next()

@@ -199,7 +199,7 @@

Method Details

- list(parent, conditions=None, pageSize=None, pageToken=None, sortBy=None, x__xgafv=None) + list(parent, conditions=None, pageSize=None, pageToken=None, sortBy=None, sortOrder=None, x__xgafv=None)
Lists entity rows of a particular entity type contained in the request. Note: 1. Currently, only max of one 'sort_by' column is supported. 2. If no 'sort_by' column is provided, the primary key of the table is used. If zero or more than one primary key is available, we default to the unpaginated list entities logic which only returns the first page. 3. The values of the 'sort_by' columns must uniquely identify an entity row, otherwise undefined behaviors may be observed during pagination. 4. Since transactions are not supported, any updates, inserts or deletes during pagination can lead to stale data being returned or other unexpected behaviors.
 
 Args:
@@ -208,6 +208,7 @@ 

Method Details

pageSize: integer, Number of entity rows to return. Defaults page size = 25. Max page size = 200. pageToken: string, Page token value if available from a previous request. sortBy: string, List of 'sort_by' columns to use when returning the results. (repeated) + sortOrder: string, List of 'sort_order' columns to use when returning the results. (repeated) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.assessmentRules.html b/docs/dyn/contactcenterinsights_v1.projects.locations.assessmentRules.html new file mode 100644 index 0000000000..0384c1f33f --- /dev/null +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.assessmentRules.html @@ -0,0 +1,331 @@ + + + +

Contact Center AI Insights API . projects . locations . assessmentRules

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, assessmentRuleId=None, body=None, x__xgafv=None)

+

Creates an assessment rule.

+

+ delete(name, x__xgafv=None)

+

Deletes an assessment rule.

+

+ get(name, x__xgafv=None)

+

Get an assessment rule.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists assessment rules.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an assessment rule.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, assessmentRuleId=None, body=None, x__xgafv=None) +
Creates an assessment rule.
+
+Args:
+  parent: string, Required. The parent resource of the assessment rule. Required. The location to create a assessment rule for. Format: `projects//locations/` or `projects//locations/` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+  "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+  "createTime": "A String", # Output only. The time at which this assessment rule was created.
+  "displayName": "A String", # Display Name of the assessment rule.
+  "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+  "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+    "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+    "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+    "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+    "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+  },
+  "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+    "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+    "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+    "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+    "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+  },
+  "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+}
+
+  assessmentRuleId: string, Optional. A unique ID for the new AssessmentRule. This ID will become the final component of the AssessmentRule's resource name. If no ID is specified, a server-generated ID will be used. This value should be 4-64 characters and must match the regular expression `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+  "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+  "createTime": "A String", # Output only. The time at which this assessment rule was created.
+  "displayName": "A String", # Display Name of the assessment rule.
+  "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+  "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+    "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+    "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+    "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+    "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+  },
+  "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+    "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+    "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+    "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+    "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+  },
+  "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes an assessment rule.
+
+Args:
+  name: string, Required. The name of the assessment rule to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Get an assessment rule.
+
+Args:
+  name: string, Required. The name of the assessment rule to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+  "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+  "createTime": "A String", # Output only. The time at which this assessment rule was created.
+  "displayName": "A String", # Display Name of the assessment rule.
+  "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+  "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+    "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+    "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+    "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+    "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+  },
+  "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+    "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+    "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+    "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+    "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+  },
+  "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists assessment rules.
+
+Args:
+  parent: string, Required. The parent resource of the assessment rules. (required)
+  pageSize: integer, Optional. The maximum number of assessment rule to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.
+  pageToken: string, Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response of listing assessment rules.
+  "assessmentRules": [ # The assessment rules that match the request.
+    { # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+      "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+      "createTime": "A String", # Output only. The time at which this assessment rule was created.
+      "displayName": "A String", # Display Name of the assessment rule.
+      "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+      "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+        "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+        "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+        "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+        "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+      },
+      "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+        "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+        "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+        "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+        "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+      },
+      "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an assessment rule.
+
+Args:
+  name: string, Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+  "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+  "createTime": "A String", # Output only. The time at which this assessment rule was created.
+  "displayName": "A String", # Display Name of the assessment rule.
+  "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+  "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+    "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+    "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+    "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+    "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+  },
+  "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+    "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+    "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+    "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+    "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+  },
+  "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+}
+
+  updateMask: string, Optional. The list of fields to be updated. If the update_mask is not provided, the update will be applied to all fields.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.
+  "active": True or False, # If true, apply this rule to conversations. Otherwise, this rule is inactive.
+  "createTime": "A String", # Output only. The time at which this assessment rule was created.
+  "displayName": "A String", # Display Name of the assessment rule.
+  "name": "A String", # Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}
+  "sampleRule": { # Message for sampling conversations. # The sample rule for the assessment rule.
+    "conversationFilter": "A String", # To specify the filter for the conversions that should apply this sample rule. An empty filter means this sample rule applies to all conversations.
+    "dimension": "A String", # Optional. Group by dimension to sample the conversation. If no dimension is provided, the sampling will be applied to the project level. Current supported dimensions is 'quality_metadata.agent_info.agent_id'.
+    "samplePercentage": 3.14, # Percentage of conversations that we should sample based on the dimension between [0, 100].
+    "sampleRow": "A String", # Number of the conversations that we should sample based on the dimension.
+  },
+  "scheduleInfo": { # Message for schedule info. # Schedule info for the assessment rule.
+    "endTime": "A String", # End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.
+    "schedule": "A String", # The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...
+    "startTime": "A String", # Start time of the schedule. If not specified, will start as soon as the schedule is created.
+    "timeZone": "A String", # The timezone to use for the groc expression. If not specified, defaults to UTC.
+  },
+  "updateTime": "A String", # Output only. The most recent time at which this assessment rule was updated.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.html b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.html new file mode 100644 index 0000000000..5f8c357016 --- /dev/null +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.html @@ -0,0 +1,393 @@ + + + +

Contact Center AI Insights API . projects . locations . authorizedViewSets . authorizedViews . conversations . assessments

+

Instance Methods

+

+ notes() +

+

Returns the notes Resource.

+ +

+ appeal(name, body=None, x__xgafv=None)

+

Appeal an Assessment.

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create Assessment.

+

+ delete(name, force=None, x__xgafv=None)

+

Delete an Assessment.

+

+ finalize(name, body=None, x__xgafv=None)

+

Finalize an Assessment.

+

+ get(name, x__xgafv=None)

+

Get Assessment.

+

+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

List Assessments.

+

+ list_next()

+

Retrieves the next page of results.

+

+ publish(name, body=None, x__xgafv=None)

+

Publish an Assessment.

+

Method Details

+
+ appeal(name, body=None, x__xgafv=None) +
Appeal an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to appeal. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to appeal an assessment.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create Assessment.
+
+Args:
+  parent: string, Required. The parent resource of the assessment. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ delete(name, force=None, x__xgafv=None) +
Delete an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to delete. (required)
+  force: boolean, Optional. If set to true, all of this assessment's notes will also be deleted. Otherwise, the request will only succeed if it has no notes.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ finalize(name, body=None, x__xgafv=None) +
Finalize an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to finalize. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to finalize an assessment. Finalizing makes an assessment and its notes immutable.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ get(name, x__xgafv=None) +
Get Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
List Assessments.
+
+Args:
+  parent: string, Required. The parent resource of the assessments. To list all assessments in a location, substitute the conversation ID with a '-' character. (required)
+  filter: string, Optional. A filter to reduce results to a specific subset. Supported filters include: * `state` - The state of the assessment * `agent_info.agent_id` - The ID of the agent the assessment is for
+  pageSize: integer, The maximum number of assessments to list. If zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.
+  pageToken: string, Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response of listing assessments.
+  "assessments": [ # The assessments that match the request.
+    { # The assessment resource.
+      "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+        "agentId": "A String", # A user-specified string representing the agent.
+        "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+        "displayName": "A String", # The agent's name.
+        "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+        "location": "A String", # The agent's location.
+        "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+        "teams": [ # User-specified strings representing the agent's teams.
+          "A String",
+        ],
+      },
+      "createTime": "A String", # Output only. The time at which the assessment was created.
+      "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+      "state": "A String", # Output only. The state of the assessment.
+      "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ publish(name, body=None, x__xgafv=None) +
Publish an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to publish. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to publish an assessment. Draft and appealed assessments can be published. Publishing simply changes the state of the assessment to published, allowing the console and authorized views to filter on the state.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.html b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.html new file mode 100644 index 0000000000..5b9ebaa50e --- /dev/null +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.html @@ -0,0 +1,282 @@ + + + +

Contact Center AI Insights API . projects . locations . authorizedViewSets . authorizedViews . conversations . assessments . notes

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create Note.

+

+ delete(name, x__xgafv=None)

+

Deletes a Note.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List Notes.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update Note.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create Note.
+
+Args:
+  parent: string, Required. The parent resource of the note. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a Note.
+
+Args:
+  name: string, Required. The name of the note to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List Notes.
+
+Args:
+  parent: string, Required. The parent resource of the notes. (required)
+  pageSize: integer, Optional. The maximum number of notes to return in the response. If zero the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.
+  pageToken: string, Optional. The value returned by the last `ListNotesResponse`. This value indicates that this is a continuation of a prior `ListNotes` call and that the system should return the next page of data.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response of listing notes.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "notes": [ # The notes that match the request.
+    { # The conversation assessment note resource.
+      "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+      },
+      "content": "A String", # The note content.
+      "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+        "turnIndex": 42, # The conversation turn index that the note is associated with.
+      },
+      "createTime": "A String", # Output only. The time at which the note was created.
+      "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+      "noteCreator": { # Information about a user. # Output only. The user that created the note.
+        "username": "A String", # The user's username.
+      },
+      "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+        "qaQuestion": "A String", # The question resource that the note is associated with.
+      },
+      "updateTime": "A String", # Output only. The time at which the note was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update Note.
+
+Args:
+  name: string, Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+  updateMask: string, Optional. The list of fields to be updated. If the update_mask is empty, all updateable fields will be updated. Acceptable fields include: * `content`
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.html b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.html index e7bc52c555..63c34ea5a5 100644 --- a/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.html +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.authorizedViewSets.authorizedViews.conversations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the analyses Resource.

+

+ assessments() +

+

Returns the assessments Resource.

+

feedbackLabels()

diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.html b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.html new file mode 100644 index 0000000000..67aa4c60e5 --- /dev/null +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.html @@ -0,0 +1,393 @@ + + + +

Contact Center AI Insights API . projects . locations . conversations . assessments

+

Instance Methods

+

+ notes() +

+

Returns the notes Resource.

+ +

+ appeal(name, body=None, x__xgafv=None)

+

Appeal an Assessment.

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create Assessment.

+

+ delete(name, force=None, x__xgafv=None)

+

Delete an Assessment.

+

+ finalize(name, body=None, x__xgafv=None)

+

Finalize an Assessment.

+

+ get(name, x__xgafv=None)

+

Get Assessment.

+

+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

List Assessments.

+

+ list_next()

+

Retrieves the next page of results.

+

+ publish(name, body=None, x__xgafv=None)

+

Publish an Assessment.

+

Method Details

+
+ appeal(name, body=None, x__xgafv=None) +
Appeal an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to appeal. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to appeal an assessment.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create Assessment.
+
+Args:
+  parent: string, Required. The parent resource of the assessment. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ delete(name, force=None, x__xgafv=None) +
Delete an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to delete. (required)
+  force: boolean, Optional. If set to true, all of this assessment's notes will also be deleted. Otherwise, the request will only succeed if it has no notes.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ finalize(name, body=None, x__xgafv=None) +
Finalize an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to finalize. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to finalize an assessment. Finalizing makes an assessment and its notes immutable.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ get(name, x__xgafv=None) +
Get Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to get. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ +
+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
List Assessments.
+
+Args:
+  parent: string, Required. The parent resource of the assessments. To list all assessments in a location, substitute the conversation ID with a '-' character. (required)
+  filter: string, Optional. A filter to reduce results to a specific subset. Supported filters include: * `state` - The state of the assessment * `agent_info.agent_id` - The ID of the agent the assessment is for
+  pageSize: integer, The maximum number of assessments to list. If zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.
+  pageToken: string, Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response of listing assessments.
+  "assessments": [ # The assessments that match the request.
+    { # The assessment resource.
+      "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+        "agentId": "A String", # A user-specified string representing the agent.
+        "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+        "displayName": "A String", # The agent's name.
+        "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+        "location": "A String", # The agent's location.
+        "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+        "teams": [ # User-specified strings representing the agent's teams.
+          "A String",
+        ],
+      },
+      "createTime": "A String", # Output only. The time at which the assessment was created.
+      "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+      "state": "A String", # Output only. The state of the assessment.
+      "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ publish(name, body=None, x__xgafv=None) +
Publish an Assessment.
+
+Args:
+  name: string, Required. The name of the assessment to publish. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The message to publish an assessment. Draft and appealed assessments can be published. Publishing simply changes the state of the assessment to published, allowing the console and authorized views to filter on the state.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The assessment resource.
+  "agentInfo": { # Information about an agent involved in the conversation. # Information about the agent the assessment is for.
+    "agentId": "A String", # A user-specified string representing the agent.
+    "agentType": "A String", # The agent type, e.g. HUMAN_AGENT.
+    "displayName": "A String", # The agent's name.
+    "dispositionCode": "A String", # A user-provided string indicating the outcome of the agent's segment of the call.
+    "location": "A String", # The agent's location.
+    "team": "A String", # A user-specified string representing the agent's team. Deprecated in favor of the `teams` field.
+    "teams": [ # User-specified strings representing the agent's teams.
+      "A String",
+    ],
+  },
+  "createTime": "A String", # Output only. The time at which the assessment was created.
+  "name": "A String", # Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}
+  "state": "A String", # Output only. The state of the assessment.
+  "updateTime": "A String", # Output only. The time at which the assessment was last updated.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.notes.html b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.notes.html new file mode 100644 index 0000000000..b4f287e9f5 --- /dev/null +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.assessments.notes.html @@ -0,0 +1,282 @@ + + + +

Contact Center AI Insights API . projects . locations . conversations . assessments . notes

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create Note.

+

+ delete(name, x__xgafv=None)

+

Deletes a Note.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List Notes.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update Note.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create Note.
+
+Args:
+  parent: string, Required. The parent resource of the note. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a Note.
+
+Args:
+  name: string, Required. The name of the note to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List Notes.
+
+Args:
+  parent: string, Required. The parent resource of the notes. (required)
+  pageSize: integer, Optional. The maximum number of notes to return in the response. If zero the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.
+  pageToken: string, Optional. The value returned by the last `ListNotesResponse`. This value indicates that this is a continuation of a prior `ListNotes` call and that the system should return the next page of data.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response of listing notes.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "notes": [ # The notes that match the request.
+    { # The conversation assessment note resource.
+      "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+      },
+      "content": "A String", # The note content.
+      "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+        "turnIndex": 42, # The conversation turn index that the note is associated with.
+      },
+      "createTime": "A String", # Output only. The time at which the note was created.
+      "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+      "noteCreator": { # Information about a user. # Output only. The user that created the note.
+        "username": "A String", # The user's username.
+      },
+      "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+        "qaQuestion": "A String", # The question resource that the note is associated with.
+      },
+      "updateTime": "A String", # Output only. The time at which the note was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update Note.
+
+Args:
+  name: string, Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+  updateMask: string, Optional. The list of fields to be updated. If the update_mask is empty, all updateable fields will be updated. Acceptable fields include: * `content`
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The conversation assessment note resource.
+  "assessmentNote": { # A note about the entire parent assessment. # The note is associated to the entire parent assessment.
+  },
+  "content": "A String", # The note content.
+  "conversationTurnNote": { # A note about a conversation turn. # The note is associated with a conversation turn.
+    "turnIndex": 42, # The conversation turn index that the note is associated with.
+  },
+  "createTime": "A String", # Output only. The time at which the note was created.
+  "name": "A String", # Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}
+  "noteCreator": { # Information about a user. # Output only. The user that created the note.
+    "username": "A String", # The user's username.
+  },
+  "qaQuestionNote": { # A note about a QA question. # The note is associated with a QA question in one of the conversation's scorecard results.
+    "qaQuestion": "A String", # The question resource that the note is associated with.
+  },
+  "updateTime": "A String", # Output only. The time at which the note was last updated.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.html b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.html index 1233577903..732479ab03 100644 --- a/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.html +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.conversations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the analyses Resource.

+

+ assessments() +

+

Returns the assessments Resource.

+

feedbackLabels()

diff --git a/docs/dyn/contactcenterinsights_v1.projects.locations.html b/docs/dyn/contactcenterinsights_v1.projects.locations.html index 99000b0f7b..f31eb0db9f 100644 --- a/docs/dyn/contactcenterinsights_v1.projects.locations.html +++ b/docs/dyn/contactcenterinsights_v1.projects.locations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the analysisRules Resource.

+

+ assessmentRules() +

+

Returns the assessmentRules Resource.

+

authorizedViewSets()

diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index 8394997718..e2a3aa670b 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -295,6 +295,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -323,6 +326,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -664,6 +672,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -963,6 +972,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1521,6 +1531,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -1549,6 +1562,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -1890,6 +1908,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2189,6 +2208,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2611,6 +2631,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -2639,6 +2662,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -2980,6 +3008,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3279,6 +3308,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3661,6 +3691,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4551,6 +4584,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4579,6 +4615,8 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "desiredAnonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # The desired authenticator groups config for the cluster. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. diff --git a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html index 605c184c03..1d2e3f893b 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html @@ -192,6 +192,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -677,6 +678,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -989,6 +991,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index f2e52322f0..a82e898351 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -172,6 +172,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -386,6 +389,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -414,6 +420,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -755,6 +766,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1054,6 +1066,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1612,6 +1625,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -1640,6 +1656,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -1981,6 +2002,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2280,6 +2302,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2746,6 +2769,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -2774,6 +2800,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -3115,6 +3146,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3414,6 +3446,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -4578,6 +4611,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4606,6 +4642,8 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "desiredAnonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # The desired authenticator groups config for the cluster. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. diff --git a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html index 8f1bcdc531..ca07818253 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html @@ -257,6 +257,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -742,6 +743,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1054,6 +1056,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index 69defcc481..c7e932bc94 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -295,6 +295,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -330,6 +333,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -689,6 +697,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1010,6 +1019,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1611,6 +1621,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -1646,6 +1659,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -2005,6 +2023,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2326,6 +2345,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2791,6 +2811,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -2826,6 +2849,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -3185,6 +3213,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3506,6 +3535,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3931,6 +3961,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4828,6 +4861,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4863,6 +4899,8 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "desiredAnonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # AuthenticatorGroupsConfig specifies the config for the cluster security groups settings. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html index 85ac9aec28..8b949d17f4 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html @@ -193,6 +193,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -695,6 +696,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1024,6 +1026,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index ffb10eec12..db028ae656 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -172,6 +172,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -393,6 +396,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -428,6 +434,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -787,6 +798,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1108,6 +1120,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1709,6 +1722,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -1744,6 +1760,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -2103,6 +2124,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2424,6 +2446,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -2933,6 +2956,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -2968,6 +2994,11 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "alphaClusterFeatureGates": [ # The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. "featureX=true" or "featureX=false") + "A String", + ], + "anonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "authenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # Configuration controlling RBAC group membership information. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. @@ -3327,6 +3358,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -3648,6 +3680,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -4855,6 +4888,9 @@

Method Details

"gkeBackupAgentConfig": { # Configuration for the Backup for GKE Agent. # Configuration for the Backup for GKE agent addon. "enabled": True or False, # Whether the Backup for GKE agent is enabled for this cluster. }, + "highScaleCheckpointingConfig": { # Configuration for the High Scale Checkpointing. # Configuration for the High Scale Checkpointing add-on. + "enabled": True or False, # Whether the High Scale Checkpointing is enabled for this cluster. + }, "horizontalPodAutoscaling": { # Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. # Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. "disabled": True or False, # Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring. }, @@ -4890,6 +4926,8 @@

Method Details

"enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, }, + "desiredAnonymousAuthenticationConfig": { # AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication. # Configuration for limiting anonymous access to all endpoints except the health checks. + }, "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # AuthenticatorGroupsConfig specifies the config for the cluster security groups settings. "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html index 75b8d74bd0..17bff9ac2a 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html @@ -258,6 +258,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -760,6 +761,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption @@ -1089,6 +1091,7 @@

Method Details

], "advancedMachineFeatures": { # Specifies options for controlling advanced machine features. # Advanced features for the Compute Engine VM. "enableNestedVirtualization": True or False, # Whether or not to enable nested virtualization (defaults to false). + "performanceMonitoringUnit": "A String", # Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node. "threadsPerCore": "A String", # The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. }, "bootDiskKmsKey": "A String", # The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption diff --git a/docs/dyn/datacatalog_v1.projects.locations.entryGroups.entries.tags.html b/docs/dyn/datacatalog_v1.projects.locations.entryGroups.entries.tags.html index ff5f464e78..111de585b8 100644 --- a/docs/dyn/datacatalog_v1.projects.locations.entryGroups.entries.tags.html +++ b/docs/dyn/datacatalog_v1.projects.locations.entryGroups.entries.tags.html @@ -94,7 +94,7 @@

Instance Methods

Updates an existing tag.

reconcile(parent, body=None, x__xgafv=None)

-

`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message.

+

`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message. Note: SearchCatalog might return stale search results for up to 24 hours after the `ReconcileTags` operation completes.

Method Details

close() @@ -304,7 +304,7 @@

Method Details

reconcile(parent, body=None, x__xgafv=None) -
`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message.
+  
`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message. Note: SearchCatalog might return stale search results for up to 24 hours after the `ReconcileTags` operation completes.
 
 Args:
   parent: string, Required. Name of Entry to be tagged. (required)
diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.debug.html b/docs/dyn/dataflow_v1b3.projects.jobs.debug.html
index cef81704c6..ae7c32dee1 100644
--- a/docs/dyn/dataflow_v1b3.projects.jobs.debug.html
+++ b/docs/dyn/dataflow_v1b3.projects.jobs.debug.html
@@ -80,6 +80,9 @@ 

Instance Methods

getConfig(projectId, jobId, body=None, x__xgafv=None)

Get encoded debug configuration for component. Not cacheable.

+

+ getWorkerStacktraces(projectId, jobId, body=None, x__xgafv=None)

+

Get worker stacktraces from debug capture.

sendCapture(projectId, jobId, body=None, x__xgafv=None)

Send encoded debug capture data for component.

@@ -118,6 +121,46 @@

Method Details

}
+
+ getWorkerStacktraces(projectId, jobId, body=None, x__xgafv=None) +
Get worker stacktraces from debug capture.
+
+Args:
+  projectId: string, The project id. (required)
+  jobId: string, The job for which to get stacktraces. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request to get worker stacktraces from debug capture.
+  "workerId": "A String", # The worker for which to get stacktraces. The returned stacktraces will be for the SDK harness running on this worker.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response to get worker stacktraces from debug capture.
+  "sdks": [ # Repeated as unified worker may have multiple SDK processes.
+    { # A structured representation of an SDK.
+      "sdkId": "A String", # The SDK harness id.
+      "stacks": [ # The stacktraces for the processes running on the SDK harness.
+        { # A structuredstacktrace for a process running on the worker.
+          "stackContent": "A String", # The raw stack trace.
+          "threadCount": 42, # With java thread dumps we may get collapsed stacks e.g., N threads in stack "". Instead of having to copy over the same stack trace N times, this int field captures this.
+          "threadName": "A String", # Thread name. For example, "CommitThread-0,10,main"
+          "threadState": "A String", # The state of the thread. For example, "WAITING".
+          "timestamp": "A String", # Timestamp at which the stack was captured.
+        },
+      ],
+    },
+  ],
+}
+
+
sendCapture(projectId, jobId, body=None, x__xgafv=None)
Send encoded debug capture data for component.
diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html
index 4069be3465..873353f7ce 100644
--- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html
+++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html
@@ -118,6 +118,7 @@ 

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -168,6 +169,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -225,6 +227,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -288,6 +291,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.releaseConfigs.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.releaseConfigs.html index 3032d383b0..65379758b6 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.releaseConfigs.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.releaseConfigs.html @@ -118,6 +118,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -168,6 +169,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -242,6 +244,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -303,6 +306,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -373,6 +377,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). @@ -423,6 +428,7 @@

Method Details

"defaultDatabase": "A String", # Optional. The default database (Google Cloud project ID). "defaultLocation": "A String", # Optional. The default BigQuery location to use. Defaults to "US". See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. "defaultNotebookRuntimeOptions": { # Configures various aspects of Dataform notebook runtime. # Optional. The default notebook runtime options. + "aiPlatformNotebookRuntimeTemplate": "A String", # Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications. "gcsOutputBucket": "A String", # Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`. }, "defaultSchema": "A String", # Optional. The default schema (BigQuery dataset ID). diff --git a/docs/dyn/datamigration_v1.projects.locations.connectionProfiles.html b/docs/dyn/datamigration_v1.projects.locations.connectionProfiles.html index e8a95885eb..bb2d17c2ea 100644 --- a/docs/dyn/datamigration_v1.projects.locations.connectionProfiles.html +++ b/docs/dyn/datamigration_v1.projects.locations.connectionProfiles.html @@ -135,7 +135,7 @@

Method Details

"labels": { # Labels for the AlloyDB cluster created by DMS. An object containing a list of 'key', 'value' pairs. "a_key": "A String", }, - "primaryInstanceSettings": { # Settings for the cluster's primary instance + "primaryInstanceSettings": { # Settings for the cluster's primary instance # Settings for the cluster's primary instance "databaseFlags": { # Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used. "a_key": "A String", }, @@ -238,6 +238,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. @@ -263,6 +266,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. Username for the Oracle ASM connection. @@ -277,6 +283,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticServiceIpConnectivity": { # Static IP address connectivity configured on service project. # Static Service IP connectivity. @@ -299,6 +308,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static ip connectivity data (default, no additional details needed). @@ -337,6 +349,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static IP connectivity data (default, no additional details needed). @@ -447,7 +462,7 @@

Method Details

"labels": { # Labels for the AlloyDB cluster created by DMS. An object containing a list of 'key', 'value' pairs. "a_key": "A String", }, - "primaryInstanceSettings": { # Settings for the cluster's primary instance + "primaryInstanceSettings": { # Settings for the cluster's primary instance # Settings for the cluster's primary instance "databaseFlags": { # Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used. "a_key": "A String", }, @@ -550,6 +565,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. @@ -575,6 +593,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. Username for the Oracle ASM connection. @@ -589,6 +610,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticServiceIpConnectivity": { # Static IP address connectivity configured on service project. # Static Service IP connectivity. @@ -611,6 +635,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static ip connectivity data (default, no additional details needed). @@ -649,6 +676,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static IP connectivity data (default, no additional details needed). @@ -744,7 +774,7 @@

Method Details

"labels": { # Labels for the AlloyDB cluster created by DMS. An object containing a list of 'key', 'value' pairs. "a_key": "A String", }, - "primaryInstanceSettings": { # Settings for the cluster's primary instance + "primaryInstanceSettings": { # Settings for the cluster's primary instance # Settings for the cluster's primary instance "databaseFlags": { # Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used. "a_key": "A String", }, @@ -847,6 +877,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. @@ -872,6 +905,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. Username for the Oracle ASM connection. @@ -886,6 +922,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticServiceIpConnectivity": { # Static IP address connectivity configured on service project. # Static Service IP connectivity. @@ -908,6 +947,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static ip connectivity data (default, no additional details needed). @@ -946,6 +988,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static IP connectivity data (default, no additional details needed). @@ -1002,7 +1047,7 @@

Method Details

"labels": { # Labels for the AlloyDB cluster created by DMS. An object containing a list of 'key', 'value' pairs. "a_key": "A String", }, - "primaryInstanceSettings": { # Settings for the cluster's primary instance + "primaryInstanceSettings": { # Settings for the cluster's primary instance # Settings for the cluster's primary instance "databaseFlags": { # Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used. "a_key": "A String", }, @@ -1105,6 +1150,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. @@ -1130,6 +1178,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "username": "A String", # Required. Username for the Oracle ASM connection. @@ -1144,6 +1195,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticServiceIpConnectivity": { # Static IP address connectivity configured on service project. # Static Service IP connectivity. @@ -1166,6 +1220,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static ip connectivity data (default, no additional details needed). @@ -1204,6 +1261,9 @@

Method Details

"caCertificate": "A String", # Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. The replica will use this certificate to verify it's connecting to the right host. "clientCertificate": "A String", # Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server.If this field is used then the 'client_key' field is mandatory. "clientKey": "A String", # Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory. + "sslFlags": { # Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of "key": "value" pairs. Example: { "server_certificate_hostname": "server.com"}. + "a_key": "A String", + }, "type": "A String", # Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'. }, "staticIpConnectivity": { # The source database will allow incoming connections from the public IP of the destination database. You can retrieve the public IP of the Cloud SQL instance from the Cloud SQL console or using Cloud SQL APIs. No additional configuration is required. # Static IP connectivity data (default, no additional details needed). diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.html index bbb12c127f..b890b7aefc 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.html @@ -157,6 +157,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -258,9 +259,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -345,6 +359,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID @@ -621,6 +636,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -722,9 +738,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -809,6 +838,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID @@ -999,6 +1029,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -1100,9 +1131,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -1187,6 +1231,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID @@ -1338,6 +1383,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -1439,9 +1485,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -1526,6 +1585,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID @@ -1695,6 +1755,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -1796,9 +1857,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -1883,6 +1957,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Output only. Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html index 9f79544f9b..e4d68f01b6 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html @@ -206,6 +206,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -307,9 +308,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -394,6 +408,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Output only. Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID @@ -513,6 +528,7 @@

Method Details

"bigqueryPublishingConfig": { # Describes BigQuery publishing configurations. # Optional. Configuration for metadata publishing. "connection": "A String", # Optional. The BigQuery connection used to create BigLake tables. Must be in the form projects/{project_id}/locations/{location_id}/connections/{connection_id} "location": "A String", # Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations. + "project": "A String", # Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". "tableType": "A String", # Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. }, "storageConfig": { # Configurations related to Cloud Storage as the data source. # Cloud Storage related configurations. @@ -614,9 +630,22 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of a data quality scan. + "catalogPublishingStatus": { # The status of publishing the data scan result to Catalog. # Output only. The status of publishing the data scan to Catalog. + "state": "A String", # Output only. Execution state for catalog publishing. + }, "columns": [ # Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it. { # DataQualityColumnResult provides a more detailed, per-column view of the results. "column": "A String", # Output only. The column specified in the DataQualityRule. + "dimensions": [ # Output only. The dimension-level results for this column. + { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. + "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. + "name": "A String", # Optional. The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. + }, + "passed": True or False, # Output only. Whether the dimension passed or failed. + "score": 3.14, # Output only. The dimension-level data quality score for this data scan job if and only if the 'dimension' field is set.The score ranges between 0, 100 (up to two decimal points). + }, + ], + "passed": True or False, # Output only. Whether the column passed or failed. "score": 3.14, # Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points). }, ], @@ -701,6 +730,7 @@

Method Details

"score": 3.14, # Output only. The overall data quality score.The score ranges between 0, 100 (up to two decimal points). }, "dataQualitySpec": { # DataQualityScan related setting. # Output only. Settings for a data quality scan. + "catalogPublishingEnabled": True or False, # Optional. If set, the latest DataScan job result will be published to Dataplex Catalog. "postScanActions": { # The configuration of post scan actions of DataQualityScan. # Optional. Actions to take upon job completion. "bigqueryExport": { # The configuration of BigQuery export post scan action. # Optional. If set, results will be exported to the provided BigQuery table. "resultsTable": "A String", # Optional. The BigQuery table to export DataQualityScan results to. Format: //bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID or projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID diff --git a/docs/dyn/dataplex_v1.projects.locations.glossaries.categories.html b/docs/dyn/dataplex_v1.projects.locations.glossaries.categories.html index de641f1c13..cf250fd5b8 100644 --- a/docs/dyn/dataplex_v1.projects.locations.glossaries.categories.html +++ b/docs/dyn/dataplex_v1.projects.locations.glossaries.categories.html @@ -79,19 +79,19 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, categoryId=None, x__xgafv=None)

-

GlossaryCategory APIs are CCFE passthrough APIs. Creates a new GlossaryCategory resource.

+

Creates a new GlossaryCategory resource.

delete(name, x__xgafv=None)

-

Deletes a GlossaryCategory resource. All the categories and terms nested directly under the category will be moved one level up to the parent in the hierarchy.

+

Deletes a GlossaryCategory resource. All the GlossaryCategories and GlossaryTerms nested directly under the specified GlossaryCategory will be moved one level up to the parent in the hierarchy.

get(name, x__xgafv=None)

-

Retrieves a specified GlossaryCategory resource.

+

Gets a GlossaryCategory resource.

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists GlossaryCategory resources in a glossary.

+

Lists GlossaryCategory resources in a Glossary.

list_next()

Retrieves the next page of results.

@@ -112,27 +112,27 @@

Method Details

create(parent, body=None, categoryId=None, x__xgafv=None) -
GlossaryCategory APIs are CCFE passthrough APIs. Creates a new GlossaryCategory resource.
+  
Creates a new GlossaryCategory resource.
 
 Args:
-  parent: string, Required. The parent resource where this GlossaryCategory will be created. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} where locationId refers to a GCP region. (required)
+  parent: string, Required. The parent resource where this GlossaryCategory will be created. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where locationId refers to a GCP region. (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other.
+{ # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other.
   "createTime": "A String", # Output only. The time at which the GlossaryCategory was created.
   "description": "A String", # Optional. The user-mutable description of the GlossaryCategory.
-  "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified.
+  "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified.
   "labels": { # Optional. User-defined labels for the GlossaryCategory.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
-  "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
+  "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
+  "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
   "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name.
   "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated.
 }
 
-  categoryId: string, Required. Category ID: GlossaryCategory identifier.
+  categoryId: string, Required. GlossaryCategory identifier.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -141,15 +141,15 @@ 

Method Details

Returns: An object of the form: - { # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other. + { # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other. "createTime": "A String", # Output only. The time at which the GlossaryCategory was created. "description": "A String", # Optional. The user-mutable description of the GlossaryCategory. - "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryCategory. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} - "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} + "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated. }
@@ -157,10 +157,10 @@

Method Details

delete(name, x__xgafv=None) -
Deletes a GlossaryCategory resource. All the categories and terms nested directly under the category will be moved one level up to the parent in the hierarchy.
+  
Deletes a GlossaryCategory resource. All the GlossaryCategories and GlossaryTerms nested directly under the specified GlossaryCategory will be moved one level up to the parent in the hierarchy.
 
 Args:
-  name: string, Required. The name of the GlossaryCategory to delete. Format: projects/{project}/locations/{location}/glossary/{glossary}/categories/{glossary_category} (required)
+  name: string, Required. The name of the GlossaryCategory to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -175,10 +175,10 @@ 

Method Details

get(name, x__xgafv=None) -
Retrieves a specified GlossaryCategory resource.
+  
Gets a GlossaryCategory resource.
 
 Args:
-  name: string, Required. The name of the GlossaryCategory to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary}/categories/{glossary_category} (required)
+  name: string, Required. The name of the GlossaryCategory to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -187,15 +187,15 @@ 

Method Details

Returns: An object of the form: - { # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other. + { # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other. "createTime": "A String", # Output only. The time at which the GlossaryCategory was created. "description": "A String", # Optional. The user-mutable description of the GlossaryCategory. - "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryCategory. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} - "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} + "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated. }
@@ -251,13 +251,13 @@

Method Details

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists GlossaryCategory resources in a glossary.
+  
Lists GlossaryCategory resources in a Glossary.
 
 Args:
-  parent: string, Required. The parent, which has this collection of categories. Format: projects/{project}/locations/{location}/glossaries/{glossary} Location is the GCP region. (required)
-  filter: string, Optional. Filter expression that filters categories listed in the response. Filters supported: List GlossaryCategories based on immediate parent in the resource hierarchy. This will only return the GlossaryCategories nested directly under the parent and no other subsequent nested categories will be returned.
-  orderBy: string, Optional. Order by expression that orders categories listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
-  pageSize: integer, Optional. The maximum number of categories to return. The service may return fewer than this value. If unspecified, at most 50 categories will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  parent: string, Required. The parent, which has this collection of GlossaryCategories. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} Location is the GCP region. (required)
+  filter: string, Optional. Filter expression that filters GlossaryCategories listed in the response. Filters are supported on the following fields: - immediate_parentExamples of using a filter are: - immediate_parent="projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}" - immediate_parent="projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}"This will only return the GlossaryCategories that are directly nested under the specified parent.
+  orderBy: string, Optional. Order by expression that orders GlossaryCategories listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
+  pageSize: integer, Optional. The maximum number of GlossaryCategories to return. The service may return fewer than this value. If unspecified, at most 50 GlossaryCategories will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, Optional. A page token, received from a previous ListGlossaryCategories call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListGlossaryCategories must match the call that provided the page token.
   x__xgafv: string, V1 error format.
     Allowed values
@@ -268,16 +268,16 @@ 

Method Details

An object of the form: { # List GlossaryCategories Response - "categories": [ # Lists the glossaryCategories in the specified parent. - { # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other. + "categories": [ # Lists the GlossaryCategories in the specified parent. + { # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other. "createTime": "A String", # Output only. The time at which the GlossaryCategory was created. "description": "A String", # Optional. The user-mutable description of the GlossaryCategory. - "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryCategory. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} - "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} + "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated. }, @@ -308,19 +308,19 @@

Method Details

Updates a GlossaryCategory resource.
 
 Args:
-  name: string, Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} (required)
+  name: string, Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other.
+{ # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other.
   "createTime": "A String", # Output only. The time at which the GlossaryCategory was created.
   "description": "A String", # Optional. The user-mutable description of the GlossaryCategory.
-  "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified.
+  "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified.
   "labels": { # Optional. User-defined labels for the GlossaryCategory.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
-  "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
+  "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
+  "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
   "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name.
   "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated.
 }
@@ -334,15 +334,15 @@ 

Method Details

Returns: An object of the form: - { # A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other. + { # A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other. "createTime": "A String", # Output only. The time at which the GlossaryCategory was created. "description": "A String", # Optional. The user-mutable description of the GlossaryCategory. - "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryCategory. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} - "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} + "parent": "A String", # Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryCategory. This ID will be different if the GlossaryCategory is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryCategory was last updated. }
diff --git a/docs/dyn/dataplex_v1.projects.locations.glossaries.html b/docs/dyn/dataplex_v1.projects.locations.glossaries.html index 91e5351397..daad979ac1 100644 --- a/docs/dyn/dataplex_v1.projects.locations.glossaries.html +++ b/docs/dyn/dataplex_v1.projects.locations.glossaries.html @@ -92,10 +92,10 @@

Instance Methods

Creates a new Glossary resource.

delete(name, etag=None, x__xgafv=None)

-

Deletes a Glossary resource. All the categories and terms within the glossary must be deleted before a glossary can be deleted.

+

Deletes a Glossary resource. All the categories and terms within the Glossary must be deleted before the Glossary can be deleted.

get(name, x__xgafv=None)

-

Retrieves a specified Glossary resource.

+

Gets a Glossary resource.

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

@@ -125,27 +125,27 @@

Method Details

Creates a new Glossary resource.
 
 Args:
-  parent: string, Required. The parent resource where this Glossary will be created. Format: projects/{projectId}/locations/{locationId} where locationId refers to a GCP region. (required)
+  parent: string, Required. The parent resource where this Glossary will be created. Format: projects/{project_id_or_number}/locations/{location_id} where location_id refers to a GCP region. (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # A Glossary represents a collection of categories and terms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the categories and terms within it.
-  "categoryCount": 42, # Output only. The number of categories in the glossary.
-  "createTime": "A String", # Output only. The time at which the glossary was created.
-  "description": "A String", # Optional. The user-mutable description of the glossary.
-  "displayName": "A String", # Optional. User friendly display name of the glossary. This is user-mutable. This will be same as the glossaryId, if not specified.
+{ # A Glossary represents a collection of GlossaryCategories and GlossaryTerms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the GlossaryCategories and GlossaryTerms within it.
+  "categoryCount": 42, # Output only. The number of GlossaryCategories in the Glossary.
+  "createTime": "A String", # Output only. The time at which the Glossary was created.
+  "description": "A String", # Optional. The user-mutable description of the Glossary.
+  "displayName": "A String", # Optional. User friendly display name of the Glossary. This is user-mutable. This will be same as the GlossaryId, if not specified.
   "etag": "A String", # Optional. Needed for resource freshness validation. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
   "labels": { # Optional. User-defined labels for the Glossary.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}
-  "termCount": 42, # Output only. The number of terms in the glossary.
+  "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}
+  "termCount": 42, # Output only. The number of GlossaryTerms in the Glossary.
   "uid": "A String", # Output only. System generated unique id for the Glossary. This ID will be different if the Glossary is deleted and re-created with the same name.
-  "updateTime": "A String", # Output only. The time at which the glossary was last updated.
+  "updateTime": "A String", # Output only. The time at which the Glossary was last updated.
 }
 
   glossaryId: string, Required. Glossary ID: Glossary identifier.
-  validateOnly: boolean, Optional. Validates the request without actually creating the glossary. Default: false.
+  validateOnly: boolean, Optional. Validates the request without actually creating the Glossary. Default: false.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -177,10 +177,10 @@ 

Method Details

delete(name, etag=None, x__xgafv=None) -
Deletes a Glossary resource. All the categories and terms within the glossary must be deleted before a glossary can be deleted.
+  
Deletes a Glossary resource. All the categories and terms within the Glossary must be deleted before the Glossary can be deleted.
 
 Args:
-  name: string, Required. The name of the Glossary to delete. Format: projects/{project}/locations/{location}/glossary/{glossary} (required)
+  name: string, Required. The name of the Glossary to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} (required)
   etag: string, Optional. The etag of the Glossary. If this is provided, it must match the server's etag. If the etag is provided and does not match the server-computed etag, the request must fail with a ABORTED error code.
   x__xgafv: string, V1 error format.
     Allowed values
@@ -213,10 +213,10 @@ 

Method Details

get(name, x__xgafv=None) -
Retrieves a specified Glossary resource.
+  
Gets a Glossary resource.
 
 Args:
-  name: string, Required. The name of the Glossary to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary} (required)
+  name: string, Required. The name of the Glossary to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -225,19 +225,19 @@ 

Method Details

Returns: An object of the form: - { # A Glossary represents a collection of categories and terms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the categories and terms within it. - "categoryCount": 42, # Output only. The number of categories in the glossary. - "createTime": "A String", # Output only. The time at which the glossary was created. - "description": "A String", # Optional. The user-mutable description of the glossary. - "displayName": "A String", # Optional. User friendly display name of the glossary. This is user-mutable. This will be same as the glossaryId, if not specified. + { # A Glossary represents a collection of GlossaryCategories and GlossaryTerms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the GlossaryCategories and GlossaryTerms within it. + "categoryCount": 42, # Output only. The number of GlossaryCategories in the Glossary. + "createTime": "A String", # Output only. The time at which the Glossary was created. + "description": "A String", # Optional. The user-mutable description of the Glossary. + "displayName": "A String", # Optional. User friendly display name of the Glossary. This is user-mutable. This will be same as the GlossaryId, if not specified. "etag": "A String", # Optional. Needed for resource freshness validation. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "labels": { # Optional. User-defined labels for the Glossary. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} - "termCount": 42, # Output only. The number of terms in the glossary. + "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} + "termCount": 42, # Output only. The number of GlossaryTerms in the Glossary. "uid": "A String", # Output only. System generated unique id for the Glossary. This ID will be different if the Glossary is deleted and re-created with the same name. - "updateTime": "A String", # Output only. The time at which the glossary was last updated. + "updateTime": "A String", # Output only. The time at which the Glossary was last updated. }
@@ -294,10 +294,10 @@

Method Details

Lists Glossary resources in a project and location.
 
 Args:
-  parent: string, Required. The parent, which has this collection of glossaries. Format: projects/{project}/locations/{location} Location is the GCP region. (required)
-  filter: string, Optional. Filter expression that filters glossaries listed in the response. Initially, no filter is supported.
-  orderBy: string, Optional. Order by expression that orders glossaries listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
-  pageSize: integer, Optional. The maximum number of glossaries to return. The service may return fewer than this value. If unspecified, at most 50 glossaries will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  parent: string, Required. The parent, which has this collection of Glossaries. Format: projects/{project_id_or_number}/locations/{location_id} where location_id refers to a GCP region. (required)
+  filter: string, Optional. Filter expression that filters Glossaries listed in the response. Filters on proto fields of Glossary are supported. Examples of using a filter are: - display_name="my-glossary" - categoryCount=1 - termCount=0
+  orderBy: string, Optional. Order by expression that orders Glossaries listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
+  pageSize: integer, Optional. The maximum number of Glossaries to return. The service may return fewer than this value. If unspecified, at most 50 Glossaries will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, Optional. A page token, received from a previous ListGlossaries call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListGlossaries must match the call that provided the page token.
   x__xgafv: string, V1 error format.
     Allowed values
@@ -308,20 +308,20 @@ 

Method Details

An object of the form: { # List Glossaries Response - "glossaries": [ # Lists the glossaries in the specified parent. - { # A Glossary represents a collection of categories and terms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the categories and terms within it. - "categoryCount": 42, # Output only. The number of categories in the glossary. - "createTime": "A String", # Output only. The time at which the glossary was created. - "description": "A String", # Optional. The user-mutable description of the glossary. - "displayName": "A String", # Optional. User friendly display name of the glossary. This is user-mutable. This will be same as the glossaryId, if not specified. + "glossaries": [ # Lists the Glossaries in the specified parent. + { # A Glossary represents a collection of GlossaryCategories and GlossaryTerms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the GlossaryCategories and GlossaryTerms within it. + "categoryCount": 42, # Output only. The number of GlossaryCategories in the Glossary. + "createTime": "A String", # Output only. The time at which the Glossary was created. + "description": "A String", # Optional. The user-mutable description of the Glossary. + "displayName": "A String", # Optional. User friendly display name of the Glossary. This is user-mutable. This will be same as the GlossaryId, if not specified. "etag": "A String", # Optional. Needed for resource freshness validation. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "labels": { # Optional. User-defined labels for the Glossary. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} - "termCount": 42, # Output only. The number of terms in the glossary. + "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} + "termCount": 42, # Output only. The number of GlossaryTerms in the Glossary. "uid": "A String", # Output only. System generated unique id for the Glossary. This ID will be different if the Glossary is deleted and re-created with the same name. - "updateTime": "A String", # Output only. The time at which the glossary was last updated. + "updateTime": "A String", # Output only. The time at which the Glossary was last updated. }, ], "nextPageToken": "A String", # A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. @@ -350,27 +350,27 @@

Method Details

Updates a Glossary resource.
 
 Args:
-  name: string, Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} (required)
+  name: string, Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # A Glossary represents a collection of categories and terms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the categories and terms within it.
-  "categoryCount": 42, # Output only. The number of categories in the glossary.
-  "createTime": "A String", # Output only. The time at which the glossary was created.
-  "description": "A String", # Optional. The user-mutable description of the glossary.
-  "displayName": "A String", # Optional. User friendly display name of the glossary. This is user-mutable. This will be same as the glossaryId, if not specified.
+{ # A Glossary represents a collection of GlossaryCategories and GlossaryTerms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the GlossaryCategories and GlossaryTerms within it.
+  "categoryCount": 42, # Output only. The number of GlossaryCategories in the Glossary.
+  "createTime": "A String", # Output only. The time at which the Glossary was created.
+  "description": "A String", # Optional. The user-mutable description of the Glossary.
+  "displayName": "A String", # Optional. User friendly display name of the Glossary. This is user-mutable. This will be same as the GlossaryId, if not specified.
   "etag": "A String", # Optional. Needed for resource freshness validation. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
   "labels": { # Optional. User-defined labels for the Glossary.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}
-  "termCount": 42, # Output only. The number of terms in the glossary.
+  "name": "A String", # Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}
+  "termCount": 42, # Output only. The number of GlossaryTerms in the Glossary.
   "uid": "A String", # Output only. System generated unique id for the Glossary. This ID will be different if the Glossary is deleted and re-created with the same name.
-  "updateTime": "A String", # Output only. The time at which the glossary was last updated.
+  "updateTime": "A String", # Output only. The time at which the Glossary was last updated.
 }
 
   updateMask: string, Required. The list of fields to update.
-  validateOnly: boolean, Optional. Validates the request without actually updating the glossary. Default: false.
+  validateOnly: boolean, Optional. Validates the request without actually updating the Glossary. Default: false.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
diff --git a/docs/dyn/dataplex_v1.projects.locations.glossaries.terms.html b/docs/dyn/dataplex_v1.projects.locations.glossaries.terms.html
index 6999f7dc62..fae25ec583 100644
--- a/docs/dyn/dataplex_v1.projects.locations.glossaries.terms.html
+++ b/docs/dyn/dataplex_v1.projects.locations.glossaries.terms.html
@@ -79,19 +79,19 @@ 

Instance Methods

Close httplib2 connections.

create(parent, body=None, termId=None, x__xgafv=None)

-

GlossaryTerm APIs are CCFE passthrough APIs. Creates a new GlossaryTerm resource.

+

Creates a new GlossaryTerm resource.

delete(name, x__xgafv=None)

Deletes a GlossaryTerm resource.

get(name, x__xgafv=None)

-

Retrieves a specified GlossaryTerm resource.

+

Gets a GlossaryTerm resource.

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists GlossaryTerm resources in a glossary.

+

Lists GlossaryTerm resources in a Glossary.

list_next()

Retrieves the next page of results.

@@ -112,27 +112,27 @@

Method Details

create(parent, body=None, termId=None, x__xgafv=None) -
GlossaryTerm APIs are CCFE passthrough APIs. Creates a new GlossaryTerm resource.
+  
Creates a new GlossaryTerm resource.
 
 Args:
-  parent: string, Required. The parent resource where this GlossaryTerm will be created. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} where locationId refers to a GCP region. (required)
+  parent: string, Required. The parent resource where the GlossaryTerm will be created. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where location_id refers to a GCP region. (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them.
+{ # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them.
   "createTime": "A String", # Output only. The time at which the GlossaryTerm was created.
   "description": "A String", # Optional. The user-mutable description of the GlossaryTerm.
-  "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified.
+  "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified.
   "labels": { # Optional. User-defined labels for the GlossaryTerm.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId}
-  "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
+  "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}
+  "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
   "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name.
   "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated.
 }
 
-  termId: string, Required. Term ID: GlossaryTerm identifier.
+  termId: string, Required. GlossaryTerm identifier.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -141,15 +141,15 @@ 

Method Details

Returns: An object of the form: - { # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them. + { # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them. "createTime": "A String", # Output only. The time at which the GlossaryTerm was created. "description": "A String", # Optional. The user-mutable description of the GlossaryTerm. - "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryTerm. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId} - "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} + "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated. }
@@ -160,7 +160,7 @@

Method Details

Deletes a GlossaryTerm resource.
 
 Args:
-  name: string, Required. The name of the GlossaryTerm to delete. Format: projects/{project}/locations/{location}/glossary/{glossary}/terms/{glossary_term} (required)
+  name: string, Required. The name of the GlossaryTerm to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -175,10 +175,10 @@ 

Method Details

get(name, x__xgafv=None) -
Retrieves a specified GlossaryTerm resource.
+  
Gets a GlossaryTerm resource.
 
 Args:
-  name: string, Required. The name of the GlossaryTerm to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary}/terms/{glossary_term} (required)
+  name: string, Required. The name of the GlossaryTerm to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -187,15 +187,15 @@ 

Method Details

Returns: An object of the form: - { # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them. + { # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them. "createTime": "A String", # Output only. The time at which the GlossaryTerm was created. "description": "A String", # Optional. The user-mutable description of the GlossaryTerm. - "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryTerm. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId} - "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} + "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated. }
@@ -251,13 +251,13 @@

Method Details

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists GlossaryTerm resources in a glossary.
+  
Lists GlossaryTerm resources in a Glossary.
 
 Args:
-  parent: string, Required. The parent, which has this collection of terms. Format: projects/{project}/locations/{location}/glossaries/{glossary} Location is the GCP region. (required)
-  filter: string, Optional. Filter expression that filters terms listed in the response. Filters supported: List GlossaryTerms based on immediate parent in the resource hierarchy. This will only return the terms nested directly under the parent and no other subsequent nested terms will be returned.
-  orderBy: string, Optional. Order by expression that orders terms listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
-  pageSize: integer, Optional. The maximum number of terms to return. The service may return fewer than this value. If unspecified, at most 50 terms will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  parent: string, Required. The parent, which has this collection of GlossaryTerms. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where location_id refers to a GCP region. (required)
+  filter: string, Optional. Filter expression that filters GlossaryTerms listed in the response. Filters are supported on the following fields: - immediate_parentExamples of using a filter are: - immediate_parent="projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}" - immediate_parent="projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}"This will only return the GlossaryTerms that are directly nested under the specified parent.
+  orderBy: string, Optional. Order by expression that orders GlossaryTerms listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.
+  pageSize: integer, Optional. The maximum number of GlossaryTerms to return. The service may return fewer than this value. If unspecified, at most 50 GlossaryTerms will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, Optional. A page token, received from a previous ListGlossaryTerms call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListGlossaryTerms must match the call that provided the page token.
   x__xgafv: string, V1 error format.
     Allowed values
@@ -269,16 +269,16 @@ 

Method Details

{ # List GlossaryTerms Response "nextPageToken": "A String", # A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. - "terms": [ # Lists the terms in the specified parent. - { # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them. + "terms": [ # Lists the GlossaryTerms in the specified parent. + { # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them. "createTime": "A String", # Output only. The time at which the GlossaryTerm was created. "description": "A String", # Optional. The user-mutable description of the GlossaryTerm. - "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryTerm. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId} - "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} + "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated. }, @@ -308,19 +308,19 @@

Method Details

Updates a GlossaryTerm resource.
 
 Args:
-  name: string, Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId} (required)
+  name: string, Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them.
+{ # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them.
   "createTime": "A String", # Output only. The time at which the GlossaryTerm was created.
   "description": "A String", # Optional. The user-mutable description of the GlossaryTerm.
-  "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified.
+  "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified.
   "labels": { # Optional. User-defined labels for the GlossaryTerm.
     "a_key": "A String",
   },
-  "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId}
-  "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}
+  "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}
+  "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}
   "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name.
   "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated.
 }
@@ -334,15 +334,15 @@ 

Method Details

Returns: An object of the form: - { # GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them. + { # GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them. "createTime": "A String", # Output only. The time at which the GlossaryTerm was created. "description": "A String", # Optional. The user-mutable description of the GlossaryTerm. - "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified. + "displayName": "A String", # Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified. "labels": { # Optional. User-defined labels for the GlossaryTerm. "a_key": "A String", }, - "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId} - "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId} + "name": "A String", # Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id} + "parent": "A String", # Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id} "uid": "A String", # Output only. System generated unique id for the GlossaryTerm. This ID will be different if the GlossaryTerm is deleted and re-created with the same name. "updateTime": "A String", # Output only. The time at which the GlossaryTerm was last updated. }
diff --git a/docs/dyn/dataproc_v1.projects.locations.autoscalingPolicies.html b/docs/dyn/dataproc_v1.projects.locations.autoscalingPolicies.html index 66e4a49e1f..4a9ea76f08 100644 --- a/docs/dyn/dataproc_v1.projects.locations.autoscalingPolicies.html +++ b/docs/dyn/dataproc_v1.projects.locations.autoscalingPolicies.html @@ -138,6 +138,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -182,6 +183,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -251,6 +253,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -350,6 +353,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -500,6 +504,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -544,6 +549,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index ae1ea6149a..c711a4396c 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -248,6 +248,15 @@

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sparkBatch": { # A configuration for running an Apache Spark (https://spark.apache.org/) batch workload. # Optional. Spark batch config. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. @@ -453,6 +462,15 @@

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sparkBatch": { # A configuration for running an Apache Spark (https://spark.apache.org/) batch workload. # Optional. Spark batch config. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. @@ -616,6 +634,15 @@

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sparkBatch": { # A configuration for running an Apache Spark (https://spark.apache.org/) batch workload. # Optional. Spark batch config. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html index 79b4131b45..79ecb99a60 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html @@ -83,12 +83,6 @@

Instance Methods

accessJob(name, jobId=None, parent=None, x__xgafv=None)

Obtain data corresponding to a spark job for a Spark Application.

-

- accessNativeBuildInfo(name, parent=None, x__xgafv=None)

-

Obtain build data for Native Job

-

- accessNativeSqlQuery(name, executionId=None, parent=None, x__xgafv=None)

-

Obtain data corresponding to a particular Native SQL Query for a Spark Application.

accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)

Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.

@@ -125,12 +119,6 @@

Instance Methods

searchJobs_next()

Retrieves the next page of results.

-

- searchNativeSqlQueries(name, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

-

Obtain data corresponding to Native SQL Queries for a Spark Application.

-

- searchNativeSqlQueries_next()

-

Retrieves the next page of results.

searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)

Obtain data corresponding to SQL Queries for a Spark Application.

@@ -325,67 +313,6 @@

Method Details

}
-
- accessNativeBuildInfo(name, parent=None, x__xgafv=None) -
Obtain build data for Native Job
-
-Args:
-  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
-  parent: string, Required. Parent (Batch) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Details of Native Build Info for a Spark Application
-  "buildInfo": { # Native Build Info Data
-    "buildClass": "A String", # Optional. Build class of Native.
-    "buildInfo": [ # Optional. Build related details.
-      { # Native Build Info
-        "buildKey": "A String", # Optional. Build key.
-        "buildValue": "A String", # Optional. Build value.
-      },
-    ],
-  },
-}
-
- -
- accessNativeSqlQuery(name, executionId=None, parent=None, x__xgafv=None) -
Obtain data corresponding to a particular Native SQL Query for a Spark Application.
-
-Args:
-  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
-  executionId: string, Required. Execution ID
-  parent: string, Required. Parent (Batch) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Details of a query for a Spark Application
-  "executionData": { # Native SQL Execution Data # Native SQL Execution Data
-    "description": "A String", # Optional. Description of the execution.
-    "executionId": "A String", # Required. Execution ID of the Native SQL Execution.
-    "fallbackDescription": "A String", # Optional. Description of the fallback.
-    "fallbackNodeToReason": [ # Optional. Fallback node to reason.
-      { # Native SQL Execution Data
-        "fallbackNode": "A String", # Optional. Fallback node information.
-        "fallbackReason": "A String", # Optional. Fallback to Spark reason.
-      },
-    ],
-    "numFallbackNodes": 42, # Optional. Number of nodes fallen back to Spark.
-    "numNativeNodes": 42, # Optional. Number of nodes in Native.
-  },
-}
-
-
accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)
Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.
@@ -1501,57 +1428,6 @@ 

Method Details

-
- searchNativeSqlQueries(name, pageSize=None, pageToken=None, parent=None, x__xgafv=None) -
Obtain data corresponding to Native SQL Queries for a Spark Application.
-
-Args:
-  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
-  pageSize: integer, Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
-  pageToken: string, Optional. A page token received from a previous SearchSparkApplicationNativeSqlQueries call. Provide this token to retrieve the subsequent page.
-  parent: string, Required. Parent (Batch) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # List of all Native SQL queries details for a Spark Application.
-  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationNativeSqlQueriesRequest.
-  "sparkApplicationNativeSqlQueries": [ # Output only. Native SQL Execution Data
-    { # Native SQL Execution Data
-      "description": "A String", # Optional. Description of the execution.
-      "executionId": "A String", # Required. Execution ID of the Native SQL Execution.
-      "fallbackDescription": "A String", # Optional. Description of the fallback.
-      "fallbackNodeToReason": [ # Optional. Fallback node to reason.
-        { # Native SQL Execution Data
-          "fallbackNode": "A String", # Optional. Fallback node information.
-          "fallbackReason": "A String", # Optional. Fallback to Spark reason.
-        },
-      ],
-      "numFallbackNodes": 42, # Optional. Number of nodes fallen back to Spark.
-      "numNativeNodes": 42, # Optional. Number of nodes in Native.
-    },
-  ],
-}
-
- -
- searchNativeSqlQueries_next() -
Retrieves the next page of results.
-
-        Args:
-          previous_request: The request for the previous page. (required)
-          previous_response: The response from the request for the previous page. (required)
-
-        Returns:
-          A request object that you can call 'execute()' on to request the next
-          page. Returns None if there are no more items in the collection.
-        
-
-
searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)
Obtain data corresponding to SQL Queries for a Spark Application.
diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html
index e3e4fdde44..229aa6b8ee 100644
--- a/docs/dyn/dataproc_v1.projects.locations.sessions.html
+++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html
@@ -191,6 +191,15 @@ 

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sessionTemplate": "A String", # Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session. "sparkConnectSession": { # Spark connect configuration for an interactive session. # Optional. Spark connect session config. @@ -366,6 +375,15 @@

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sessionTemplate": "A String", # Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session. "sparkConnectSession": { # Spark connect configuration for an interactive session. # Optional. Spark connect session config. @@ -481,6 +499,15 @@

Method Details

"a_key": "A String", }, "outputUri": "A String", # Output only. A URI pointing to the location of the stdout and stderr of the workload. + "propertiesInfo": { # Properties of the workload organized by origin. # Optional. Properties of the workload organized by origin. + "autotuningProperties": { # Output only. Properties set by autotuning engine. + "a_key": { # Annotatated property value. + "annotation": "A String", # Annotation, comment or explanation why the property was set. + "overriddenValue": "A String", # Optional. Value which was replaced by the corresponding component. + "value": "A String", # Property value. + }, + }, + }, }, "sessionTemplate": "A String", # Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session. "sparkConnectSession": { # Spark connect configuration for an interactive session. # Optional. Spark connect session config. diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html index a5de9a955c..a135f3d6d5 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html @@ -83,12 +83,6 @@

Instance Methods

accessJob(name, jobId=None, parent=None, x__xgafv=None)

Obtain data corresponding to a spark job for a Spark Application.

-

- accessNativeBuildInfo(name, parent=None, x__xgafv=None)

-

Obtain data corresponding to Native Build Information for a Spark Application.

-

- accessNativeSqlQuery(name, executionId=None, parent=None, x__xgafv=None)

-

Obtain data corresponding to a particular Native SQL Query for a Spark Application.

accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)

Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.

@@ -125,12 +119,6 @@

Instance Methods

searchJobs_next()

Retrieves the next page of results.

-

- searchNativeSqlQueries(name, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

-

Obtain data corresponding to Native SQL Queries for a Spark Application.

-

- searchNativeSqlQueries_next()

-

Retrieves the next page of results.

searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)

Obtain data corresponding to SQL Queries for a Spark Application.

@@ -325,67 +313,6 @@

Method Details

}
-
- accessNativeBuildInfo(name, parent=None, x__xgafv=None) -
Obtain data corresponding to Native Build Information for a Spark Application.
-
-Args:
-  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
-  parent: string, Required. Parent (Session) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Details of a native build info for a Spark Application
-  "executionData": { # Native SQL Execution Data
-    "buildClass": "A String", # Optional. Build class of Native.
-    "buildInfo": [ # Optional. Build related details.
-      { # Native Build Info
-        "buildKey": "A String", # Optional. Build key.
-        "buildValue": "A String", # Optional. Build value.
-      },
-    ],
-  },
-}
-
- -
- accessNativeSqlQuery(name, executionId=None, parent=None, x__xgafv=None) -
Obtain data corresponding to a particular Native SQL Query for a Spark Application.
-
-Args:
-  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
-  executionId: string, Required. Execution ID
-  parent: string, Required. Parent (Session) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Details of a native query for a Spark Application
-  "executionData": { # Native SQL Execution Data # Native SQL Execution Data
-    "description": "A String", # Optional. Description of the execution.
-    "executionId": "A String", # Required. Execution ID of the Native SQL Execution.
-    "fallbackDescription": "A String", # Optional. Description of the fallback.
-    "fallbackNodeToReason": [ # Optional. Fallback node to reason.
-      { # Native SQL Execution Data
-        "fallbackNode": "A String", # Optional. Fallback node information.
-        "fallbackReason": "A String", # Optional. Fallback to Spark reason.
-      },
-    ],
-    "numFallbackNodes": 42, # Optional. Number of nodes fallen back to Spark.
-    "numNativeNodes": 42, # Optional. Number of nodes in Native.
-  },
-}
-
-
accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)
Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.
@@ -1501,57 +1428,6 @@ 

Method Details

-
- searchNativeSqlQueries(name, pageSize=None, pageToken=None, parent=None, x__xgafv=None) -
Obtain data corresponding to Native SQL Queries for a Spark Application.
-
-Args:
-  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
-  pageSize: integer, Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
-  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.
-  parent: string, Required. Parent (Session) resource reference.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # List of all Native queries for a Spark Application.
-  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest.
-  "sparkApplicationNativeSqlQueries": [ # Output only. Native SQL Execution Data
-    { # Native SQL Execution Data
-      "description": "A String", # Optional. Description of the execution.
-      "executionId": "A String", # Required. Execution ID of the Native SQL Execution.
-      "fallbackDescription": "A String", # Optional. Description of the fallback.
-      "fallbackNodeToReason": [ # Optional. Fallback node to reason.
-        { # Native SQL Execution Data
-          "fallbackNode": "A String", # Optional. Fallback node information.
-          "fallbackReason": "A String", # Optional. Fallback to Spark reason.
-        },
-      ],
-      "numFallbackNodes": 42, # Optional. Number of nodes fallen back to Spark.
-      "numNativeNodes": 42, # Optional. Number of nodes in Native.
-    },
-  ],
-}
-
- -
- searchNativeSqlQueries_next() -
Retrieves the next page of results.
-
-        Args:
-          previous_request: The request for the previous page. (required)
-          previous_response: The response from the request for the previous page. (required)
-
-        Returns:
-          A request object that you can call 'execute()' on to request the next
-          page. Returns None if there are no more items in the collection.
-        
-
-
searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)
Obtain data corresponding to SQL Queries for a Spark Application.
diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html
index 5b91b2929b..1f6ba669d7 100644
--- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html
+++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html
@@ -417,7 +417,7 @@ 

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -479,6 +479,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -586,8 +587,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -596,7 +600,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -661,7 +665,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -757,7 +761,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1122,7 +1126,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1184,6 +1188,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -1291,8 +1296,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -1301,7 +1309,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1366,7 +1374,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1462,7 +1470,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1854,7 +1862,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1916,6 +1924,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -2023,8 +2032,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -2033,7 +2045,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2098,7 +2110,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2194,7 +2206,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2650,7 +2662,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2712,6 +2724,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -2819,8 +2832,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -2829,7 +2845,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2894,7 +2910,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2990,7 +3006,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3396,7 +3412,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3458,6 +3474,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -3565,8 +3582,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -3575,7 +3595,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3640,7 +3660,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3736,7 +3756,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4210,7 +4230,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4272,6 +4292,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -4379,8 +4400,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -4389,7 +4413,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4454,7 +4478,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4550,7 +4574,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4915,7 +4939,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4977,6 +5001,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -5084,8 +5109,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -5094,7 +5122,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -5159,7 +5187,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -5255,7 +5283,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). diff --git a/docs/dyn/dataproc_v1.projects.regions.autoscalingPolicies.html b/docs/dyn/dataproc_v1.projects.regions.autoscalingPolicies.html index 0c652ddb15..be15d5ffd2 100644 --- a/docs/dyn/dataproc_v1.projects.regions.autoscalingPolicies.html +++ b/docs/dyn/dataproc_v1.projects.regions.autoscalingPolicies.html @@ -138,6 +138,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -182,6 +183,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -251,6 +253,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -350,6 +353,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -500,6 +504,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", @@ -544,6 +549,7 @@

Method Details

"scaleUpMinWorkerFraction": 3.14, # Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. }, }, + "clusterType": "A String", # Optional. The type of the clusters for which this autoscaling policy is to be configured. "id": "A String", # Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. "labels": { # Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. "a_key": "A String", diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.html b/docs/dyn/dataproc_v1.projects.regions.clusters.html index f8c31cd53a..15139e8cf1 100644 --- a/docs/dyn/dataproc_v1.projects.regions.clusters.html +++ b/docs/dyn/dataproc_v1.projects.regions.clusters.html @@ -161,7 +161,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -223,6 +223,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -330,8 +331,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -340,7 +344,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -405,7 +409,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -501,7 +505,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -815,7 +819,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -877,6 +881,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -984,8 +989,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -994,7 +1002,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1059,7 +1067,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1155,7 +1163,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1430,7 +1438,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1492,6 +1500,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -1599,8 +1608,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -1609,7 +1621,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1674,7 +1686,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1770,7 +1782,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1965,7 +1977,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2027,6 +2039,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -2134,8 +2147,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -2144,7 +2160,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2209,7 +2225,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2305,7 +2321,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html b/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html index 12320e178f..4c5e78f740 100644 --- a/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html +++ b/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html @@ -116,7 +116,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -234,7 +234,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). diff --git a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html index 1c3423ef48..7fa5019cb2 100644 --- a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html @@ -417,7 +417,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -479,6 +479,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -586,8 +587,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -596,7 +600,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -661,7 +665,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -757,7 +761,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1122,7 +1126,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1184,6 +1188,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -1291,8 +1296,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -1301,7 +1309,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1366,7 +1374,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1462,7 +1470,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1854,7 +1862,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -1916,6 +1924,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -2023,8 +2032,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -2033,7 +2045,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2098,7 +2110,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2194,7 +2206,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2650,7 +2662,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2712,6 +2724,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -2819,8 +2832,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -2829,7 +2845,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2894,7 +2910,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -2990,7 +3006,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3396,7 +3412,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3458,6 +3474,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -3565,8 +3582,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -3575,7 +3595,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3640,7 +3660,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -3736,7 +3756,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4210,7 +4230,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4272,6 +4292,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -4379,8 +4400,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -4389,7 +4413,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4454,7 +4478,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4550,7 +4574,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4915,7 +4939,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -4977,6 +5001,7 @@

Method Details

"nodeGroupId": "A String", # Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. }, ], + "clusterType": "A String", # Optional. The type of the cluster. "configBucket": "A String", # Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. "dataprocMetricConfig": { # Dataproc metric config. # Optional. The config for Dataproc metrics. "metrics": [ # Required. Metrics sources to enable. @@ -5084,8 +5109,11 @@

Method Details

"lifecycleConfig": { # Specifies the cluster auto-delete schedule configuration. # Optional. Lifecycle setting for the cluster. "autoDeleteTime": "A String", # Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). "autoDeleteTtl": "A String", # Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTime": "A String", # Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "autoStopTtl": "A String", # Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleDeleteTtl": "A String", # Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). "idleStartTime": "A String", # Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + "idleStopTtl": "A String", # Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). }, "masterConfig": { # The config settings for Compute Engine resources in an instance group, such as a master or worker group. # Optional. The Compute Engine config settings for the cluster's master instance. "accelerators": [ # Optional. The Compute Engine accelerator configuration for these instances. @@ -5094,7 +5122,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -5159,7 +5187,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). @@ -5255,7 +5283,7 @@

Method Details

"acceleratorTypeUri": "A String", # Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. }, ], - "diskConfig": { # Specifies the config of disk options for a group of VM instances. # Optional. Disk option config settings. + "diskConfig": { # Specifies the config of boot disk and attached disk options for a group of VM instances. # Optional. Disk option config settings. "bootDiskProvisionedIops": "A String", # Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskProvisionedThroughput": "A String", # Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. This field is supported only if boot_disk_type is hyperdisk-balanced. "bootDiskSizeGb": 42, # Optional. Size in GB of the boot disk (default is 500GB). diff --git a/docs/dyn/datastream_v1.projects.locations.connectionProfiles.html b/docs/dyn/datastream_v1.projects.locations.connectionProfiles.html index 836bceacfe..1fefa0a5c6 100644 --- a/docs/dyn/datastream_v1.projects.locations.connectionProfiles.html +++ b/docs/dyn/datastream_v1.projects.locations.connectionProfiles.html @@ -144,6 +144,15 @@

Method Details

"secretManagerStoredPassword": "A String", # Optional. A reference to a Secret Manager resource name storing the SQLServer connection password. Mutually exclusive with the `password` field. "srvConnectionFormat": { # Srv connection format. # Srv connection format. }, + "sslConfig": { # MongoDB SSL configuration information. # Optional. SSL configuration for the MongoDB connection. + "caCertificate": "A String", # Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate. + "caCertificateSet": True or False, # Output only. Indicates whether the ca_certificate field is set. + "clientCertificate": "A String", # Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory. + "clientCertificateSet": True or False, # Output only. Indicates whether the client_certificate field is set. + "clientKey": "A String", # Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. + "clientKeySet": True or False, # Output only. Indicates whether the client_key field is set. + "secretManagerStoredClientKey": "A String", # Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field. + }, "standardConnectionFormat": { # Standard connection format. # Standard connection format. "directConnection": True or False, # Optional. Specifies whether the client connects directly to the host[:port] in the connection URI. }, @@ -356,6 +365,15 @@

Method Details

"secretManagerStoredPassword": "A String", # Optional. A reference to a Secret Manager resource name storing the SQLServer connection password. Mutually exclusive with the `password` field. "srvConnectionFormat": { # Srv connection format. # Srv connection format. }, + "sslConfig": { # MongoDB SSL configuration information. # Optional. SSL configuration for the MongoDB connection. + "caCertificate": "A String", # Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate. + "caCertificateSet": True or False, # Output only. Indicates whether the ca_certificate field is set. + "clientCertificate": "A String", # Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory. + "clientCertificateSet": True or False, # Output only. Indicates whether the client_certificate field is set. + "clientKey": "A String", # Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. + "clientKeySet": True or False, # Output only. Indicates whether the client_key field is set. + "secretManagerStoredClientKey": "A String", # Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field. + }, "standardConnectionFormat": { # Standard connection format. # Standard connection format. "directConnection": True or False, # Optional. Specifies whether the client connects directly to the host[:port] in the connection URI. }, @@ -777,6 +795,15 @@

Method Details

"secretManagerStoredPassword": "A String", # Optional. A reference to a Secret Manager resource name storing the SQLServer connection password. Mutually exclusive with the `password` field. "srvConnectionFormat": { # Srv connection format. # Srv connection format. }, + "sslConfig": { # MongoDB SSL configuration information. # Optional. SSL configuration for the MongoDB connection. + "caCertificate": "A String", # Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate. + "caCertificateSet": True or False, # Output only. Indicates whether the ca_certificate field is set. + "clientCertificate": "A String", # Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory. + "clientCertificateSet": True or False, # Output only. Indicates whether the client_certificate field is set. + "clientKey": "A String", # Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. + "clientKeySet": True or False, # Output only. Indicates whether the client_key field is set. + "secretManagerStoredClientKey": "A String", # Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field. + }, "standardConnectionFormat": { # Standard connection format. # Standard connection format. "directConnection": True or False, # Optional. Specifies whether the client connects directly to the host[:port] in the connection URI. }, @@ -931,6 +958,15 @@

Method Details

"secretManagerStoredPassword": "A String", # Optional. A reference to a Secret Manager resource name storing the SQLServer connection password. Mutually exclusive with the `password` field. "srvConnectionFormat": { # Srv connection format. # Srv connection format. }, + "sslConfig": { # MongoDB SSL configuration information. # Optional. SSL configuration for the MongoDB connection. + "caCertificate": "A String", # Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate. + "caCertificateSet": True or False, # Output only. Indicates whether the ca_certificate field is set. + "clientCertificate": "A String", # Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory. + "clientCertificateSet": True or False, # Output only. Indicates whether the client_certificate field is set. + "clientKey": "A String", # Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. + "clientKeySet": True or False, # Output only. Indicates whether the client_key field is set. + "secretManagerStoredClientKey": "A String", # Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field. + }, "standardConnectionFormat": { # Standard connection format. # Standard connection format. "directConnection": True or False, # Optional. Specifies whether the client connects directly to the host[:port] in the connection URI. }, @@ -1094,6 +1130,15 @@

Method Details

"secretManagerStoredPassword": "A String", # Optional. A reference to a Secret Manager resource name storing the SQLServer connection password. Mutually exclusive with the `password` field. "srvConnectionFormat": { # Srv connection format. # Srv connection format. }, + "sslConfig": { # MongoDB SSL configuration information. # Optional. SSL configuration for the MongoDB connection. + "caCertificate": "A String", # Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate. + "caCertificateSet": True or False, # Output only. Indicates whether the ca_certificate field is set. + "clientCertificate": "A String", # Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory. + "clientCertificateSet": True or False, # Output only. Indicates whether the client_certificate field is set. + "clientKey": "A String", # Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. + "clientKeySet": True or False, # Output only. Indicates whether the client_key field is set. + "secretManagerStoredClientKey": "A String", # Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field. + }, "standardConnectionFormat": { # Standard connection format. # Standard connection format. "directConnection": True or False, # Optional. Specifies whether the client connects directly to the host[:port] in the connection URI. }, diff --git a/docs/dyn/datastream_v1.projects.locations.privateConnections.html b/docs/dyn/datastream_v1.projects.locations.privateConnections.html index 0a7c9c01aa..965782ab42 100644 --- a/docs/dyn/datastream_v1.projects.locations.privateConnections.html +++ b/docs/dyn/datastream_v1.projects.locations.privateConnections.html @@ -129,7 +129,7 @@

Method Details

}, "name": "A String", # Output only. Identifier. The resource's name. "pscInterfaceConfig": { # The PSC Interface configuration is used to create PSC Interface between Datastream and the consumer's PSC. # PSC Interface Config. - "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}` + "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{project}/regions/{region}/networkAttachments/{name}` }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. @@ -242,7 +242,7 @@

Method Details

}, "name": "A String", # Output only. Identifier. The resource's name. "pscInterfaceConfig": { # The PSC Interface configuration is used to create PSC Interface between Datastream and the consumer's PSC. # PSC Interface Config. - "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}` + "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{project}/regions/{region}/networkAttachments/{name}` }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. @@ -293,7 +293,7 @@

Method Details

}, "name": "A String", # Output only. Identifier. The resource's name. "pscInterfaceConfig": { # The PSC Interface configuration is used to create PSC Interface between Datastream and the consumer's PSC. # PSC Interface Config. - "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}` + "networkAttachment": "A String", # Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{project}/regions/{region}/networkAttachments/{name}` }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.conversations.html index 87dba79e91..f964e9de42 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.conversations.html @@ -358,6 +358,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html index b9a18a52b5..41e5e3f6ea 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html @@ -269,6 +269,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -294,6 +297,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -471,6 +477,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -496,6 +505,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -629,6 +641,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -654,6 +669,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -775,6 +793,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -800,6 +821,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -903,6 +927,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -928,6 +955,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html index 32c320d985..a04a8d5cbc 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html @@ -1204,6 +1204,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk @@ -1564,6 +1573,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.conversations.html index 27f52f1e32..a0f6630f3f 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.conversations.html @@ -358,6 +358,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html index 654b0bf317..e26f2de8dc 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html @@ -158,6 +158,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -289,6 +292,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -361,6 +367,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -440,6 +449,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -501,6 +513,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html index 327dcfa464..3b32a66f08 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html @@ -1204,6 +1204,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk @@ -1564,6 +1573,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.conversations.html index 8518db46e1..5f2ec11d84 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.conversations.html @@ -358,6 +358,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html index 54aebf1249..0cde06ab93 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html @@ -261,6 +261,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -286,6 +289,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -463,6 +469,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -488,6 +497,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -621,6 +633,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -646,6 +661,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -767,6 +785,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -792,6 +813,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -895,6 +919,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -920,6 +947,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html index 91934213cd..a41651e0b2 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html @@ -1204,6 +1204,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk @@ -1564,6 +1573,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1Chunk diff --git a/docs/dyn/discoveryengine_v1.projects.locations.userStores.html b/docs/dyn/discoveryengine_v1.projects.locations.userStores.html index dad65b657c..c865ddcf57 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.userStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.userStores.html @@ -112,7 +112,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/discoveryengine_v1.projects.locations.userStores.userLicenses.html b/docs/dyn/discoveryengine_v1.projects.locations.userStores.userLicenses.html index 97c3a83856..fc976f5c64 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.userStores.userLicenses.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.userStores.userLicenses.html @@ -115,7 +115,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.connectorRuns.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.connectorRuns.html index f08d0f9314..51978eb6d7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.connectorRuns.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.connectorRuns.html @@ -125,13 +125,14 @@

Method Details

"message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. }, ], - "extractedRecordCount": "A String", # The number of documents extracted from connector source, ready to be ingested to UCS. + "extractedRecordCount": "A String", # The number of documents extracted from connector source, ready to be ingested to VAIS. "indexedRecordCount": "A String", # The number of documents indexed. "progress": { # Represents the progress of a sync run. # Metadata to generate the progress bar. "currentCount": "A String", # The current progress. "percentile": 3.14, # Derived. The percentile of the progress.current_count / total_count. The value is between [0, 1.0] inclusive. "totalCount": "A String", # The total. }, + "scheduledRecordCount": "A String", # The number of documents scheduled to be crawled/extracted from connector source. This only applies to third party connectors. "sourceApiRequestCount": "A String", # The number of requests sent to 3p API. "state": "A String", # The state of the entity's sync run. "stateUpdateTime": "A String", # Timestamp at which the entity sync state was last updated. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.html index dbaca23a75..75300dc43f 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataConnector.html @@ -170,13 +170,14 @@

Method Details

"message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. }, ], - "extractedRecordCount": "A String", # The number of documents extracted from connector source, ready to be ingested to UCS. + "extractedRecordCount": "A String", # The number of documents extracted from connector source, ready to be ingested to VAIS. "indexedRecordCount": "A String", # The number of documents indexed. "progress": { # Represents the progress of a sync run. # Metadata to generate the progress bar. "currentCount": "A String", # The current progress. "percentile": 3.14, # Derived. The percentile of the progress.current_count / total_count. The value is between [0, 1.0] inclusive. "totalCount": "A String", # The total. }, + "scheduledRecordCount": "A String", # The number of documents scheduled to be crawled/extracted from connector source. This only applies to third party connectors. "sourceApiRequestCount": "A String", # The number of requests sent to 3p API. "state": "A String", # The state of the entity's sync run. "stateUpdateTime": "A String", # Timestamp at which the entity sync state was last updated. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html index d95e3daed5..dd45e22752 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html @@ -110,6 +110,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -164,6 +173,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html index 4b16023d41..7a9b882aa1 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html index b4879dad06..e77aa73025 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html @@ -285,6 +285,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -310,6 +313,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -522,6 +528,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -547,6 +556,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -659,6 +671,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -684,6 +699,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -788,6 +806,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -813,6 +834,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -969,6 +993,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -994,6 +1021,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1132,6 +1162,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1157,6 +1190,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1316,6 +1352,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1341,6 +1380,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1383,6 +1425,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1408,6 +1453,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html index 16960d0f3d..4a1f3bc7fc 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html @@ -1650,6 +1650,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -1750,6 +1759,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2211,6 +2229,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2311,6 +2338,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html index 0911d6bcc7..1e27fd2df7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html @@ -285,6 +285,9 @@

Method Details

"enableSafeSearch": True or False, # Whether to enable safe search. "enableSearchAsYouType": True or False, # Whether to enable search-as-you-type behavior for the search widget. "enableVisualContentSummary": True or False, # If set to true, the widget will enable visual content summary on applicable search requests. Only used by healthcare search. + "features": { # Output only. Feature config for the engine to opt in or opt out of features. Supported keys: * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "generativeAnswerConfig": { # Describes configuration for generative answer. # Describes generative answer configuration. "disableRelatedQuestions": True or False, # Whether generated answer contains suggested related questions. "ignoreAdversarialQuery": True or False, # Optional. Specifies whether to filter out queries that are adversarial. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html index 6aac927ed6..49c7010116 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html index 2c36dd6d9d..d280397428 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html @@ -177,6 +177,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -198,6 +201,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -316,6 +320,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -337,6 +344,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -396,6 +404,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -417,6 +428,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -483,6 +495,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -504,6 +519,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -552,6 +568,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -573,6 +592,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -633,6 +653,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -654,6 +677,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. @@ -714,6 +738,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -735,6 +762,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. "recommendationMetadata": { # Additional information of a recommendation engine. # Output only. Additional information of a recommendation engine. Only applicable if solution_type is SOLUTION_TYPE_RECOMMENDATION. "dataState": "A String", # Output only. The state of data requirements for this engine: `DATA_OK` and `DATA_ERROR`. Engine cannot be trained if the data is in `DATA_ERROR` state. Engine can have `DATA_ERROR` state even if serving state is `ACTIVE`: engines were trained successfully before, but cannot be refreshed because the underlying engine no longer has sufficient data for training. + "lastTrainTime": "A String", # Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines. "lastTuneTime": "A String", # Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines. "servingState": "A String", # Output only. The serving state of the engine: `ACTIVE`, `NOT_ACTIVE`. "tuningOperation": "A String", # Output only. The latest tune operation id associated with the engine. Only applicable on Media Recommendation engines. If present, this operation id can be used to determine if there is an ongoing tune for this engine. To check the operation status, send the GetOperation request with this operation id in the engine resource format. If no tuning has happened for this engine, the string is empty. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html index 52cdef3f1c..16de7988f7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html @@ -1650,6 +1650,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -1750,6 +1759,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2211,6 +2229,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2311,6 +2338,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html index 88bde5e4aa..f6ea85ca89 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html @@ -285,6 +285,9 @@

Method Details

"enableSafeSearch": True or False, # Whether to enable safe search. "enableSearchAsYouType": True or False, # Whether to enable search-as-you-type behavior for the search widget. "enableVisualContentSummary": True or False, # If set to true, the widget will enable visual content summary on applicable search requests. Only used by healthcare search. + "features": { # Output only. Feature config for the engine to opt in or opt out of features. Supported keys: * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "generativeAnswerConfig": { # Describes configuration for generative answer. # Describes generative answer configuration. "disableRelatedQuestions": True or False, # Whether generated answer contains suggested related questions. "ignoreAdversarialQuery": True or False, # Optional. Specifies whether to filter out queries that are adversarial. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html index 3cdbdc26a2..7289f77752 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html @@ -110,6 +110,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -164,6 +173,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html index d7fa95feaa..688a5c6eba 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html index 1ffe4a338b..74be0d29b9 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html @@ -277,6 +277,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -302,6 +305,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -514,6 +520,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -539,6 +548,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -651,6 +663,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -676,6 +691,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -780,6 +798,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -805,6 +826,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -961,6 +985,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -986,6 +1013,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1124,6 +1154,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1149,6 +1182,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1256,6 +1292,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1281,6 +1320,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1323,6 +1365,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -1348,6 +1393,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html index e5d4fecd1e..acb9263833 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html @@ -1650,6 +1650,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -1750,6 +1759,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2211,6 +2229,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk @@ -2311,6 +2338,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html index dd85580d6f..d28e28dc8b 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html @@ -285,6 +285,9 @@

Method Details

"enableSafeSearch": True or False, # Whether to enable safe search. "enableSearchAsYouType": True or False, # Whether to enable search-as-you-type behavior for the search widget. "enableVisualContentSummary": True or False, # If set to true, the widget will enable visual content summary on applicable search requests. Only used by healthcare search. + "features": { # Output only. Feature config for the engine to opt in or opt out of features. Supported keys: * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "generativeAnswerConfig": { # Describes configuration for generative answer. # Describes generative answer configuration. "disableRelatedQuestions": True or False, # Whether generated answer contains suggested related questions. "ignoreAdversarialQuery": True or False, # Optional. Specifies whether to filter out queries that are adversarial. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.html index d83ee16c14..fa56725d4f 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.html @@ -117,7 +117,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.userLicenses.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.userLicenses.html index 8cfabfcb2e..69c12d286c 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.userLicenses.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.userStores.userLicenses.html @@ -115,7 +115,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html index af321d237f..790018672b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html index a1f4a6e0c0..c7f01c496f 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html @@ -274,6 +274,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -299,6 +302,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -485,6 +491,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -510,6 +519,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -652,6 +664,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -677,6 +692,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -807,6 +825,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -832,6 +853,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -944,6 +968,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -969,6 +996,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html index 91f8bd4a0d..97a61122c1 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html @@ -1612,6 +1612,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -1712,6 +1721,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2170,6 +2188,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2270,6 +2297,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html index 305d669824..e7ce560787 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html index 3091e70ead..3abdaeea2b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html @@ -172,6 +172,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -303,6 +306,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -375,6 +381,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -454,6 +463,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -515,6 +527,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -588,6 +603,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. @@ -661,6 +679,9 @@

Method Details

], "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. + "features": { # Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback` + "a_key": "A String", + }, "industryVertical": "A String", # Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. "engineFeaturesConfig": { # More feature configs of the selected engine type. # Optional. Additional engine features config. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html index b5a04c2618..924be7db4d 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html @@ -1612,6 +1612,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -1712,6 +1721,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2170,6 +2188,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2270,6 +2297,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html index 2d64439bd8..7031941db5 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html @@ -427,6 +427,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html index f0c7ab2db0..e92524771d 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html @@ -266,6 +266,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -291,6 +294,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -477,6 +483,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -502,6 +511,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -644,6 +656,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -669,6 +684,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -799,6 +817,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -824,6 +845,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -936,6 +960,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. @@ -961,6 +988,9 @@

Method Details

"excludeHtmlIds": [ # Optional. List of HTML ids to exclude from the parsed content. "A String", ], + "structuredContentTypes": [ # Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure` + "A String", + ], }, "ocrParsingConfig": { # The OCR parsing configurations for documents. # Configurations applied to OCR parser. Currently it only applies to PDFs. "enhancedDocumentElements": [ # [DEPRECATED] This field is deprecated. To use the additional enhanced document elements processing, please switch to `layout_parsing_config`. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html index 3a9b18eacc..61f0b58654 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html @@ -1612,6 +1612,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -1712,6 +1721,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2170,6 +2188,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk @@ -2270,6 +2297,15 @@

Method Details

"annotationContents": [ # Output only. Annotation contents if the current chunk contains annotations. "A String", ], + "annotationMetadata": [ # Output only. The annotation metadata includes structured content in the current chunk. + { # The annotation metadata includes structured content in the current chunk. + "imageId": "A String", # Output only. Image id is provided if the structured content is based on an image. + "structuredContent": { # The structured content information. # Output only. The structured content information. + "content": "A String", # Output only. The content of the structured content. + "structureType": "A String", # Output only. The structure type of the structured content. + }, + }, + ], "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.html index 48018c7d77..189a45deaf 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.html @@ -112,7 +112,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.userLicenses.html b/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.userLicenses.html index 46ec0658dd..ee314a3b03 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.userLicenses.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.userStores.userLicenses.html @@ -115,7 +115,6 @@

Method Details

"licenseAssignmentState": "A String", # Output only. License assignment state of the user. If the user is assigned with a license config, the user loggin will be assigned with the license; If the user's license assignment state is unassigned or unspecified, no license config will be associated to the user; "licenseConfig": "A String", # Optional. The full resource name of the Subscription(LicenseConfig) assigned to the user. "updateTime": "A String", # Output only. User update timestamp. - "user": "A String", # Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created. "userPrincipal": "A String", # Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal. "userProfile": "A String", # Optional. The user profile. We user user full name(First name + Last name) as user profile. }, diff --git a/docs/dyn/documentai_v1.projects.locations.processors.html b/docs/dyn/documentai_v1.projects.locations.processors.html index 03b295c025..3d1f236a7b 100644 --- a/docs/dyn/documentai_v1.projects.locations.processors.html +++ b/docs/dyn/documentai_v1.projects.locations.processors.html @@ -208,7 +208,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -1548,7 +1548,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. diff --git a/docs/dyn/documentai_v1.projects.locations.processors.humanReviewConfig.html b/docs/dyn/documentai_v1.projects.locations.processors.humanReviewConfig.html index 563269d019..ef72defc4d 100644 --- a/docs/dyn/documentai_v1.projects.locations.processors.humanReviewConfig.html +++ b/docs/dyn/documentai_v1.projects.locations.processors.humanReviewConfig.html @@ -114,7 +114,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. diff --git a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html index 586636426b..52aa1b2938 100644 --- a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html +++ b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html @@ -203,7 +203,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -425,7 +425,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -535,7 +535,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -1642,7 +1642,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -2667,7 +2667,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. @@ -2739,7 +2739,7 @@

Method Details

"properties": [ # Description the nested structure, or composition of an entity. { # Defines properties that can be part of the entity type. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "valueType": "A String", # A reference to the value type of the property. This type is subject to the same conventions as the `Entity.base_types` field. diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html index af25bf44c9..bb11396c84 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html @@ -205,7 +205,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -1418,7 +1418,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -1480,7 +1480,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.html index 2455507980..0c025491ec 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.html @@ -236,7 +236,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -2622,7 +2622,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html index 5284ca441a..5af413f1e5 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html @@ -1108,7 +1108,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html index 3ae885068b..b4ee7b731b 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html @@ -226,7 +226,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -462,7 +462,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -632,7 +632,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -2785,7 +2785,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -3862,7 +3862,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. @@ -3948,7 +3948,7 @@

Method Details

{ # Defines properties that can be part of the entity type. "description": "A String", # The description of the property. Could be used to provide more information about the property for model calls. "displayName": "A String", # User defined name for the property. - "groundingConfig": "A String", # Grounding config of the entity type. + "method": "A String", # Specifies how the entity's value is obtained. "name": "A String", # The name of the property. Follows the same guidelines as the EntityType name. "occurrenceType": "A String", # Occurrence type limits the number of instances an entity type appears in the document. "propertyMetadata": { # Metadata about a property. # Any additional metadata about the property can be added here. diff --git a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html index 618346d9d7..5dbcd816d2 100644 --- a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html +++ b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html @@ -106,11 +106,11 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -152,6 +152,7 @@

Method Details

"generationConfig": { # Generation config. # Optional. Generation config that the model will use to generate the response. "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. + "enableAffectiveDialog": True or False, # Optional. If enabled, the model will detect emotions and adapt its responses accordingly. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. @@ -160,6 +161,7 @@

Method Details

"featureSelectionPreference": "A String", # Required. Feature selection preference. }, "presencePenalty": 3.14, # Optional. Positive penalties. + "responseJsonSchema": "", # Optional. Output schema of the generated response. This is an alternative to `response_schema` that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must be omitted, but `response_mime_type` is required. While the full JSON Schema may be sent, not all features are supported. Specifically, only the following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` property may also be set. Cyclic references are unrolled to a limited degree and, as such, may only be used within non-required properties. (Nullable properties are not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for than those starting as a `$`, may be set. "responseLogprobs": True or False, # Optional. If true, export the logprobs results in response. "responseMimeType": "A String", # Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. "responseModalities": [ # Optional. The modalities of the response. @@ -239,11 +241,11 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -330,6 +332,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "parametersJsonSchema": "", # Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`. "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. "additionalProperties": "", # Optional. Can either be a boolean or an object; controls the presence of additional properties. "anyOf": [ # Optional. The value should be validated against any (one or more) of the subschemas in the list. @@ -369,6 +372,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "responseJsonSchema": "", # Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`. }, ], "googleSearch": { # GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. # Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. @@ -381,7 +385,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -461,11 +511,11 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -507,6 +557,7 @@

Method Details

"generationConfig": { # Generation config. # Optional. Generation config. "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. + "enableAffectiveDialog": True or False, # Optional. If enabled, the model will detect emotions and adapt its responses accordingly. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. @@ -515,6 +566,7 @@

Method Details

"featureSelectionPreference": "A String", # Required. Feature selection preference. }, "presencePenalty": 3.14, # Optional. Positive penalties. + "responseJsonSchema": "", # Optional. Output schema of the generated response. This is an alternative to `response_schema` that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must be omitted, but `response_mime_type` is required. While the full JSON Schema may be sent, not all features are supported. Specifically, only the following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` property may also be set. Cyclic references are unrolled to a limited degree and, as such, may only be used within non-required properties. (Nullable properties are not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for than those starting as a `$`, may be set. "responseLogprobs": True or False, # Optional. If true, export the logprobs results in response. "responseMimeType": "A String", # Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. "responseModalities": [ # Optional. The modalities of the response. @@ -600,11 +652,11 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -706,6 +758,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "parametersJsonSchema": "", # Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`. "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. "additionalProperties": "", # Optional. Can either be a boolean or an object; controls the presence of additional properties. "anyOf": [ # Optional. The value should be validated against any (one or more) of the subschemas in the list. @@ -745,6 +798,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "responseJsonSchema": "", # Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`. }, ], "googleSearch": { # GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. # Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. @@ -757,7 +811,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -833,11 +933,11 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -1030,11 +1130,11 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -1076,6 +1176,7 @@

Method Details

"generationConfig": { # Generation config. # Optional. Generation config. "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. + "enableAffectiveDialog": True or False, # Optional. If enabled, the model will detect emotions and adapt its responses accordingly. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. @@ -1084,6 +1185,7 @@

Method Details

"featureSelectionPreference": "A String", # Required. Feature selection preference. }, "presencePenalty": 3.14, # Optional. Positive penalties. + "responseJsonSchema": "", # Optional. Output schema of the generated response. This is an alternative to `response_schema` that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must be omitted, but `response_mime_type` is required. While the full JSON Schema may be sent, not all features are supported. Specifically, only the following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` property may also be set. Cyclic references are unrolled to a limited degree and, as such, may only be used within non-required properties. (Nullable properties are not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for than those starting as a `$`, may be set. "responseLogprobs": True or False, # Optional. If true, export the logprobs results in response. "responseMimeType": "A String", # Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. "responseModalities": [ # Optional. The modalities of the response. @@ -1169,11 +1271,11 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, @@ -1275,6 +1377,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "parametersJsonSchema": "", # Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`. "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. "additionalProperties": "", # Optional. Can either be a boolean or an object; controls the presence of additional properties. "anyOf": [ # Optional. The value should be validated against any (one or more) of the subschemas in the list. @@ -1314,6 +1417,7 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "responseJsonSchema": "", # Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`. }, ], "googleSearch": { # GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. # Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. @@ -1326,7 +1430,53 @@

Method Details

}, "retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. "disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported. + "externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding. + "apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead. + "apiKeyConfig": { # The API secret. # The API secret. + "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} + "apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set. + }, + }, + "apiSpec": "A String", # The API spec that the external API implements. + "authConfig": { # Auth configuration to run the extension. # The authentication config to access the API. + "apiKeyConfig": { # Config for authentication with API key. # Config for API key auth. + "apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + "apiKeyString": "A String", # Optional. The API key to be used in the request directly. + "httpElementLocation": "A String", # Optional. The location of the API key. + "name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + }, + "authType": "A String", # Type of auth scheme. + "googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth. + "serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + }, + "httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth. + "credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + }, + "oauthConfig": { # Config for user oauth. # Config for user oauth. + "accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + }, + "oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth. + "idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + "serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + }, + }, + "elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API. + "index": "A String", # The ElasticSearch index to use. + "numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param. + "searchTemplate": "A String", # The ElasticSearch search template to use. + }, + "endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search + "simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API. + }, + }, "vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search. + "dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used. + { # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec + "dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` + "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) + }, + ], "datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` "engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` "filter": "A String", # Optional. Filter strings to be passed to the search API. @@ -1402,11 +1552,11 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. - "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. "outcome": "A String", # Required. Outcome of the code execution. "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. }, - "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated. # Optional. Code generated by the model that is meant to be executed. "code": "A String", # Required. The code to be executed. "language": "A String", # Required. Programming language of the `code`. }, diff --git a/docs/dyn/gkebackup_v1.projects.locations.backupChannels.html b/docs/dyn/gkebackup_v1.projects.locations.backupChannels.html index 1129945c20..26bf965c04 100644 --- a/docs/dyn/gkebackup_v1.projects.locations.backupChannels.html +++ b/docs/dyn/gkebackup_v1.projects.locations.backupChannels.html @@ -118,7 +118,7 @@

Method Details

{ # A BackupChannel imposes constraints on where clusters can be backed up. The BackupChannel should be in the same project and region as the cluster being backed up. The backup can be created only in destination_project. "createTime": "A String", # Output only. The timestamp when this BackupChannel resource was created. "description": "A String", # Optional. User specified descriptive string for this BackupChannel. - "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where Backups are allowed to be stored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a BackupChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform BackupChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetBackupChannel`, and systems are expected to put that etag in the request to `UpdateBackupChannel` or `DeleteBackupChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -213,7 +213,7 @@

Method Details

{ # A BackupChannel imposes constraints on where clusters can be backed up. The BackupChannel should be in the same project and region as the cluster being backed up. The backup can be created only in destination_project. "createTime": "A String", # Output only. The timestamp when this BackupChannel resource was created. "description": "A String", # Optional. User specified descriptive string for this BackupChannel. - "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where Backups are allowed to be stored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a BackupChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform BackupChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetBackupChannel`, and systems are expected to put that etag in the request to `UpdateBackupChannel` or `DeleteBackupChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -248,7 +248,7 @@

Method Details

{ # A BackupChannel imposes constraints on where clusters can be backed up. The BackupChannel should be in the same project and region as the cluster being backed up. The backup can be created only in destination_project. "createTime": "A String", # Output only. The timestamp when this BackupChannel resource was created. "description": "A String", # Optional. User specified descriptive string for this BackupChannel. - "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where Backups are allowed to be stored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a BackupChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform BackupChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetBackupChannel`, and systems are expected to put that etag in the request to `UpdateBackupChannel` or `DeleteBackupChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -292,7 +292,7 @@

Method Details

{ # A BackupChannel imposes constraints on where clusters can be backed up. The BackupChannel should be in the same project and region as the cluster being backed up. The backup can be created only in destination_project. "createTime": "A String", # Output only. The timestamp when this BackupChannel resource was created. "description": "A String", # Optional. User specified descriptive string for this BackupChannel. - "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where Backups are allowed to be stored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a BackupChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform BackupChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetBackupChannel`, and systems are expected to put that etag in the request to `UpdateBackupChannel` or `DeleteBackupChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. diff --git a/docs/dyn/gkebackup_v1.projects.locations.restoreChannels.html b/docs/dyn/gkebackup_v1.projects.locations.restoreChannels.html index cc988d0042..35399cc1bf 100644 --- a/docs/dyn/gkebackup_v1.projects.locations.restoreChannels.html +++ b/docs/dyn/gkebackup_v1.projects.locations.restoreChannels.html @@ -118,7 +118,7 @@

Method Details

{ # A RestoreChannel imposes constraints on where backups can be restored. The RestoreChannel should be in the same project and region as the backups. The backups can only be restored in the `destination_project`. "createTime": "A String", # Output only. The timestamp when this RestoreChannel was created. "description": "A String", # Optional. User specified descriptive string for this RestoreChannel. - "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where backups will be restored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a RestoreChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform RestoreChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetRestoreChannel`, and systems are expected to put that etag in the request to `UpdateRestoreChannel` or `DeleteRestoreChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -212,7 +212,7 @@

Method Details

{ # A RestoreChannel imposes constraints on where backups can be restored. The RestoreChannel should be in the same project and region as the backups. The backups can only be restored in the `destination_project`. "createTime": "A String", # Output only. The timestamp when this RestoreChannel was created. "description": "A String", # Optional. User specified descriptive string for this RestoreChannel. - "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where backups will be restored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a RestoreChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform RestoreChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetRestoreChannel`, and systems are expected to put that etag in the request to `UpdateRestoreChannel` or `DeleteRestoreChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -248,7 +248,7 @@

Method Details

{ # A RestoreChannel imposes constraints on where backups can be restored. The RestoreChannel should be in the same project and region as the backups. The backups can only be restored in the `destination_project`. "createTime": "A String", # Output only. The timestamp when this RestoreChannel was created. "description": "A String", # Optional. User specified descriptive string for this RestoreChannel. - "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where backups will be restored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a RestoreChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform RestoreChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetRestoreChannel`, and systems are expected to put that etag in the request to `UpdateRestoreChannel` or `DeleteRestoreChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. @@ -291,7 +291,7 @@

Method Details

{ # A RestoreChannel imposes constraints on where backups can be restored. The RestoreChannel should be in the same project and region as the backups. The backups can only be restored in the `destination_project`. "createTime": "A String", # Output only. The timestamp when this RestoreChannel was created. "description": "A String", # Optional. User specified descriptive string for this RestoreChannel. - "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future. + "destinationProject": "A String", # Required. Immutable. The project into which the backups will be restored. The format is `projects/{projectId}` or `projects/{projectNumber}`. "destinationProjectId": "A String", # Output only. The project_id where backups will be restored. Example Project ID: "my-project-id". This will be an OUTPUT_ONLY field to return the project_id of the destination project. "etag": "A String", # Output only. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a RestoreChannel from overwriting each other. It is strongly suggested that systems make use of the 'etag' in the read-modify-write cycle to perform RestoreChannel updates in order to avoid race conditions: An `etag` is returned in the response to `GetRestoreChannel`, and systems are expected to put that etag in the request to `UpdateRestoreChannel` or `DeleteRestoreChannel` to ensure that their change will be applied to the same version of the resource. "labels": { # Optional. A set of custom labels supplied by user. diff --git a/docs/dyn/gkeonprem_v1.projects.locations.bareMetalClusters.html b/docs/dyn/gkeonprem_v1.projects.locations.bareMetalClusters.html index 2852231141..a64ee0a300 100644 --- a/docs/dyn/gkeonprem_v1.projects.locations.bareMetalClusters.html +++ b/docs/dyn/gkeonprem_v1.projects.locations.bareMetalClusters.html @@ -506,6 +506,7 @@

Method Details

"adminClusterMembership": "A String", # Required. The admin cluster this bare metal user cluster belongs to. This is the full resource name of the admin cluster's fleet membership. In the future, references to other resource types might be allowed if admin clusters are modeled as their own resources. "bareMetalClusterId": "A String", # User provided OnePlatform identifier that is used as part of the resource name. This must be unique among all bare metal clusters within a project and location and will return a 409 if the cluster already exists. (https://tools.ietf.org/html/rfc1123) format. "localName": "A String", # Optional. The object name of the bare metal cluster custom resource on the associated admin cluster. This field is used to support conflicting resource names when enrolling existing clusters to the API. When not provided, this field will resolve to the bare_metal_cluster_id. Otherwise, it must match the object name of the bare metal cluster custom resource. It is not modifiable outside / beyond the enrollment operation. + "localNamespace": "A String", # Optional. The namespace of the cluster. } x__xgafv: string, V1 error format. diff --git a/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html b/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html index d9cba84211..b7585b547b 100644 --- a/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html +++ b/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html @@ -312,6 +312,10 @@

Method Details

"preparedSecrets": { # VmwareAdminPreparedSecretsConfig represents configuration for admin cluster prepared secrets. # Output only. The VMware admin cluster prepared secrets configuration. It should always be enabled by the Central API, instead of letting users set it. "enabled": True or False, # Whether prepared secrets is enabled. }, + "privateRegistryConfig": { # VmwareAdminPrivateRegistryConfig represents configuration for admin cluster registry. # Configuration for registry. + "address": "A String", # The registry address. + "caCert": "A String", # When the container runtime pulls an image from private registry, the registry must prove its identity by presenting a certificate. The registry's certificate is signed by a certificate authority (CA). The container runtime uses the CA's certificate to validate the registry's certificate. + }, "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware admin cluster. "state": "A String", # Output only. The current state of VMware admin cluster. "status": { # ResourceStatus describes why a cluster or node pool has a certain status. (e.g., ERROR or DEGRADED). # Output only. ResourceStatus representing detailed cluster state. @@ -645,6 +649,10 @@

Method Details

"preparedSecrets": { # VmwareAdminPreparedSecretsConfig represents configuration for admin cluster prepared secrets. # Output only. The VMware admin cluster prepared secrets configuration. It should always be enabled by the Central API, instead of letting users set it. "enabled": True or False, # Whether prepared secrets is enabled. }, + "privateRegistryConfig": { # VmwareAdminPrivateRegistryConfig represents configuration for admin cluster registry. # Configuration for registry. + "address": "A String", # The registry address. + "caCert": "A String", # When the container runtime pulls an image from private registry, the registry must prove its identity by presenting a certificate. The registry's certificate is signed by a certificate authority (CA). The container runtime uses the CA's certificate to validate the registry's certificate. + }, "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware admin cluster. "state": "A String", # Output only. The current state of VMware admin cluster. "status": { # ResourceStatus describes why a cluster or node pool has a certain status. (e.g., ERROR or DEGRADED). # Output only. ResourceStatus representing detailed cluster state. @@ -947,6 +955,10 @@

Method Details

"preparedSecrets": { # VmwareAdminPreparedSecretsConfig represents configuration for admin cluster prepared secrets. # Output only. The VMware admin cluster prepared secrets configuration. It should always be enabled by the Central API, instead of letting users set it. "enabled": True or False, # Whether prepared secrets is enabled. }, + "privateRegistryConfig": { # VmwareAdminPrivateRegistryConfig represents configuration for admin cluster registry. # Configuration for registry. + "address": "A String", # The registry address. + "caCert": "A String", # When the container runtime pulls an image from private registry, the registry must prove its identity by presenting a certificate. The registry's certificate is signed by a certificate authority (CA). The container runtime uses the CA's certificate to validate the registry's certificate. + }, "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware admin cluster. "state": "A String", # Output only. The current state of VMware admin cluster. "status": { # ResourceStatus describes why a cluster or node pool has a certain status. (e.g., ERROR or DEGRADED). # Output only. ResourceStatus representing detailed cluster state. @@ -1211,6 +1223,10 @@

Method Details

"preparedSecrets": { # VmwareAdminPreparedSecretsConfig represents configuration for admin cluster prepared secrets. # Output only. The VMware admin cluster prepared secrets configuration. It should always be enabled by the Central API, instead of letting users set it. "enabled": True or False, # Whether prepared secrets is enabled. }, + "privateRegistryConfig": { # VmwareAdminPrivateRegistryConfig represents configuration for admin cluster registry. # Configuration for registry. + "address": "A String", # The registry address. + "caCert": "A String", # When the container runtime pulls an image from private registry, the registry must prove its identity by presenting a certificate. The registry's certificate is signed by a certificate authority (CA). The container runtime uses the CA's certificate to validate the registry's certificate. + }, "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware admin cluster. "state": "A String", # Output only. The current state of VMware admin cluster. "status": { # ResourceStatus describes why a cluster or node pool has a certain status. (e.g., ERROR or DEGRADED). # Output only. ResourceStatus representing detailed cluster state. diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html index 13d916dafa..057ca00d3b 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html @@ -147,12 +147,12 @@

Method Details

"a_key": "A String", }, "keyAlgorithm": "A String", # Optional. Key algorithm to use when generating the key pair. This key pair will be used to create the certificate. If not specified, this will default to ECDSA_P256. - "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 10 hours and 30 days. If not specified, this will be defaulted to 24 hours. - "rotationWindowPercentage": 42, # Optional. Rotation window percentage indicating when certificate rotation should be initiated based on remaining lifetime. Must be between 10 and 80. If not specified, this will be defaulted to 50. + "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 24 hours and 30 days. If not specified, this will be defaulted to 24 hours. + "rotationWindowPercentage": 42, # Optional. Rotation window percentage, the percentage of remaining lifetime after which certificate rotation is initiated. Must be between 50 and 80. If no value is specified, rotation window percentage is defaulted to 50. }, "inlineTrustConfig": { # Defines configuration for extending trust to additional trust domains. By establishing trust with another domain, the current domain will recognize and accept certificates issued by entities within the trusted domains. Note that a trust domain automatically trusts itself, eliminating the need for explicit configuration. # Optional. Represents config to add additional trusted trust domains. "additionalTrustBundles": { # Optional. Maps specific trust domains (e.g., "example.com") to their corresponding TrustStore, which contain the trusted root certificates for that domain. There can be a maximum of 10 trust domain entries in this map. Note that a trust domain automatically trusts itself and don't need to be specified here. If however, this WorkloadIdentityPool's trust domain contains any trust anchors in the additional_trust_bundles map, those trust anchors will be *appended to* the trust bundle automatically derived from your InlineCertificateIssuanceConfig's ca_pools. - "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. + "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -260,12 +260,12 @@

Method Details

"a_key": "A String", }, "keyAlgorithm": "A String", # Optional. Key algorithm to use when generating the key pair. This key pair will be used to create the certificate. If not specified, this will default to ECDSA_P256. - "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 10 hours and 30 days. If not specified, this will be defaulted to 24 hours. - "rotationWindowPercentage": 42, # Optional. Rotation window percentage indicating when certificate rotation should be initiated based on remaining lifetime. Must be between 10 and 80. If not specified, this will be defaulted to 50. + "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 24 hours and 30 days. If not specified, this will be defaulted to 24 hours. + "rotationWindowPercentage": 42, # Optional. Rotation window percentage, the percentage of remaining lifetime after which certificate rotation is initiated. Must be between 50 and 80. If no value is specified, rotation window percentage is defaulted to 50. }, "inlineTrustConfig": { # Defines configuration for extending trust to additional trust domains. By establishing trust with another domain, the current domain will recognize and accept certificates issued by entities within the trusted domains. Note that a trust domain automatically trusts itself, eliminating the need for explicit configuration. # Optional. Represents config to add additional trusted trust domains. "additionalTrustBundles": { # Optional. Maps specific trust domains (e.g., "example.com") to their corresponding TrustStore, which contain the trusted root certificates for that domain. There can be a maximum of 10 trust domain entries in this map. Note that a trust domain automatically trusts itself and don't need to be specified here. If however, this WorkloadIdentityPool's trust domain contains any trust anchors in the additional_trust_bundles map, those trust anchors will be *appended to* the trust bundle automatically derived from your InlineCertificateIssuanceConfig's ca_pools. - "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. + "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -371,12 +371,12 @@

Method Details

"a_key": "A String", }, "keyAlgorithm": "A String", # Optional. Key algorithm to use when generating the key pair. This key pair will be used to create the certificate. If not specified, this will default to ECDSA_P256. - "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 10 hours and 30 days. If not specified, this will be defaulted to 24 hours. - "rotationWindowPercentage": 42, # Optional. Rotation window percentage indicating when certificate rotation should be initiated based on remaining lifetime. Must be between 10 and 80. If not specified, this will be defaulted to 50. + "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 24 hours and 30 days. If not specified, this will be defaulted to 24 hours. + "rotationWindowPercentage": 42, # Optional. Rotation window percentage, the percentage of remaining lifetime after which certificate rotation is initiated. Must be between 50 and 80. If no value is specified, rotation window percentage is defaulted to 50. }, "inlineTrustConfig": { # Defines configuration for extending trust to additional trust domains. By establishing trust with another domain, the current domain will recognize and accept certificates issued by entities within the trusted domains. Note that a trust domain automatically trusts itself, eliminating the need for explicit configuration. # Optional. Represents config to add additional trusted trust domains. "additionalTrustBundles": { # Optional. Maps specific trust domains (e.g., "example.com") to their corresponding TrustStore, which contain the trusted root certificates for that domain. There can be a maximum of 10 trust domain entries in this map. Note that a trust domain automatically trusts itself and don't need to be specified here. If however, this WorkloadIdentityPool's trust domain contains any trust anchors in the additional_trust_bundles map, those trust anchors will be *appended to* the trust bundle automatically derived from your InlineCertificateIssuanceConfig's ca_pools. - "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. + "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -431,12 +431,12 @@

Method Details

"a_key": "A String", }, "keyAlgorithm": "A String", # Optional. Key algorithm to use when generating the key pair. This key pair will be used to create the certificate. If not specified, this will default to ECDSA_P256. - "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 10 hours and 30 days. If not specified, this will be defaulted to 24 hours. - "rotationWindowPercentage": 42, # Optional. Rotation window percentage indicating when certificate rotation should be initiated based on remaining lifetime. Must be between 10 and 80. If not specified, this will be defaulted to 50. + "lifetime": "A String", # Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 24 hours and 30 days. If not specified, this will be defaulted to 24 hours. + "rotationWindowPercentage": 42, # Optional. Rotation window percentage, the percentage of remaining lifetime after which certificate rotation is initiated. Must be between 50 and 80. If no value is specified, rotation window percentage is defaulted to 50. }, "inlineTrustConfig": { # Defines configuration for extending trust to additional trust domains. By establishing trust with another domain, the current domain will recognize and accept certificates issued by entities within the trusted domains. Note that a trust domain automatically trusts itself, eliminating the need for explicit configuration. # Optional. Represents config to add additional trusted trust domains. "additionalTrustBundles": { # Optional. Maps specific trust domains (e.g., "example.com") to their corresponding TrustStore, which contain the trusted root certificates for that domain. There can be a maximum of 10 trust domain entries in this map. Note that a trust domain automatically trusts itself and don't need to be specified here. If however, this WorkloadIdentityPool's trust domain contains any trust anchors in the additional_trust_bundles map, those trust anchors will be *appended to* the trust bundle automatically derived from your InlineCertificateIssuanceConfig's ca_pools. - "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. + "a_key": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html index a40129cb7e..2acddb6d77 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html @@ -148,7 +148,7 @@

Method Details

}, "state": "A String", # Output only. The state of the provider. "x509": { # An X.509-type identity provider represents a CA. It is trusted to assert a client identity if the client has a certificate that chains up to this CA. # An X.509-type identity provider. - "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. + "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -267,7 +267,7 @@

Method Details

}, "state": "A String", # Output only. The state of the provider. "x509": { # An X.509-type identity provider represents a CA. It is trusted to assert a client identity if the client has a certificate that chains up to this CA. # An X.509-type identity provider. - "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. + "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -328,7 +328,7 @@

Method Details

}, "state": "A String", # Output only. The state of the provider. "x509": { # An X.509-type identity provider represents a CA. It is trusted to assert a client identity if the client has a certificate that chains up to this CA. # An X.509-type identity provider. - "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. + "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. @@ -394,7 +394,7 @@

Method Details

}, "state": "A String", # Output only. The state of the provider. "x509": { # An X.509-type identity provider represents a CA. It is trusted to assert a client identity if the client has a certificate that chains up to this CA. # An X.509-type identity provider. - "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. + "trustStore": { # Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity. # Required. A TrustStore. Use this trust store as a wrapper to config the trust anchor and optional intermediate cas to help build the trust chain for the incoming end entity certificate. Follow the X.509 guidelines to define those PEM encoded certs. Only one trust store is currently supported. "intermediateCas": [ # Optional. Set of intermediate CA certificates used for building the trust chain to the trust anchor. Important: Intermediate CAs are only supported for X.509 federation. { # Intermediate CA certificates used for building the trust chain to trust anchor "pemCertificate": "A String", # PEM certificate of the PKI used for validation. Must only contain one ca certificate. diff --git a/docs/dyn/integrations_v1.projects.locations.clients.html b/docs/dyn/integrations_v1.projects.locations.clients.html index b7029f277c..53e8822ab7 100644 --- a/docs/dyn/integrations_v1.projects.locations.clients.html +++ b/docs/dyn/integrations_v1.projects.locations.clients.html @@ -83,6 +83,9 @@

Instance Methods

provision(parent, body=None, x__xgafv=None)

Perform the provisioning steps to enable a user GCP project to use IP. If GCP project already registered on IP end via Apigee Integration, provisioning will fail.

+

+ provisionClientPostProcessor(parent, body=None, x__xgafv=None)

+

Perform post provisioning steps after client is provisioned.

replace(parent, body=None, x__xgafv=None)

Update run-as service account for provisioned client

@@ -144,6 +147,7 @@

Method Details

}, "createSampleWorkflows": True or False, # Optional. Indicates if sample workflow should be created along with provisioning "enableHttpCall": True or False, # Optional. Indicates if the client should be allowed to make HTTP calls. + "enableManagedAiFeatures": True or False, # Optional. Indicates if the client should be allowed to use managed AI features, i.e. using Cloud Companion APIs of the tenant project. This will allow the customers to use features like Troubleshooting, OpenAPI spec enrichment, etc. for free. "provisionGmek": True or False, # Optional. Deprecated. Indicates provision with GMEK or CMEK. This field is deprecated and the provision would always be GMEK if cloud_kms_config is not present in the request. "runAsServiceAccount": "A String", # Optional. User input run-as service account, if empty, will bring up a new default service account "skipCpProvision": True or False, # Optional. Indicates if skip CP provision or not @@ -161,6 +165,33 @@

Method Details

}
+
+ provisionClientPostProcessor(parent, body=None, x__xgafv=None) +
Perform post provisioning steps after client is provisioned.
+
+Args:
+  parent: string, Required. Required: The ID of the GCP Project to be provisioned. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for PostProvisioning rpc call.
+  "workflows": [ # Optional. Indicate which workflows to create
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for PostProvisioning rpc call.
+}
+
+
replace(parent, body=None, x__xgafv=None)
Update run-as service account for provisioned client
diff --git a/docs/dyn/integrations_v1.projects.locations.html b/docs/dyn/integrations_v1.projects.locations.html
index d6af5cae69..fac96a433a 100644
--- a/docs/dyn/integrations_v1.projects.locations.html
+++ b/docs/dyn/integrations_v1.projects.locations.html
@@ -207,6 +207,7 @@ 

Method Details

"description": "A String", # Description of what the client is used for "enableHttpCall": True or False, # Optional. Indicates the client enables making HTTP call. "enableInternalIp": True or False, # Optional. Indicates the client enables internal IP feature, this is applicable for internal clients only. + "enableManagedAiFeatures": True or False, # Optional. Indicates if the Cloud Companion APIs will be used in the tenant project, i.e. if customer can use the managed AI features for free. "enableVariableMasking": True or False, # Optional. True if variable masking feature should be turned on for this region "id": "A String", # Globally unique ID (project_id + region) "isGmek": True or False, # Optional. Indicates the client is provisioned with CMEK or GMEK. diff --git a/docs/dyn/logging_v2.billingAccounts.logs.html b/docs/dyn/logging_v2.billingAccounts.logs.html index 80ce8b4502..1421a0d490 100644 --- a/docs/dyn/logging_v2.billingAccounts.logs.html +++ b/docs/dyn/logging_v2.billingAccounts.logs.html @@ -79,7 +79,7 @@

Instance Methods

Close httplib2 connections.

delete(logName, x__xgafv=None)

-

Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

+

Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

list(parent, pageSize=None, pageToken=None, resourceNames=None, x__xgafv=None)

Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.

@@ -94,7 +94,7 @@

Method Details

delete(logName, x__xgafv=None) -
Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
+  
Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
 
 Args:
   logName: string, Required. The resource name of the log to delete: projects/[PROJECT_ID]/logs/[LOG_ID] organizations/[ORGANIZATION_ID]/logs/[LOG_ID] billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID] folders/[FOLDER_ID]/logs/[LOG_ID][LOG_ID] must be URL-encoded. For example, "projects/my-project-id/logs/syslog", "organizations/123/logs/cloudaudit.googleapis.com%2Factivity".For more information about log names, see LogEntry. (required)
diff --git a/docs/dyn/logging_v2.folders.logs.html b/docs/dyn/logging_v2.folders.logs.html
index 58055c0824..469e8c929f 100644
--- a/docs/dyn/logging_v2.folders.logs.html
+++ b/docs/dyn/logging_v2.folders.logs.html
@@ -79,7 +79,7 @@ 

Instance Methods

Close httplib2 connections.

delete(logName, x__xgafv=None)

-

Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

+

Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

list(parent, pageSize=None, pageToken=None, resourceNames=None, x__xgafv=None)

Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.

@@ -94,7 +94,7 @@

Method Details

delete(logName, x__xgafv=None) -
Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
+  
Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
 
 Args:
   logName: string, Required. The resource name of the log to delete: projects/[PROJECT_ID]/logs/[LOG_ID] organizations/[ORGANIZATION_ID]/logs/[LOG_ID] billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID] folders/[FOLDER_ID]/logs/[LOG_ID][LOG_ID] must be URL-encoded. For example, "projects/my-project-id/logs/syslog", "organizations/123/logs/cloudaudit.googleapis.com%2Factivity".For more information about log names, see LogEntry. (required)
diff --git a/docs/dyn/logging_v2.logs.html b/docs/dyn/logging_v2.logs.html
index c9f7b756b5..4b668cabbf 100644
--- a/docs/dyn/logging_v2.logs.html
+++ b/docs/dyn/logging_v2.logs.html
@@ -79,7 +79,7 @@ 

Instance Methods

Close httplib2 connections.

delete(logName, x__xgafv=None)

-

Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

+

Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

list(parent, pageSize=None, pageToken=None, resourceNames=None, x__xgafv=None)

Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.

@@ -94,7 +94,7 @@

Method Details

delete(logName, x__xgafv=None) -
Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
+  
Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
 
 Args:
   logName: string, Required. The resource name of the log to delete: projects/[PROJECT_ID]/logs/[LOG_ID] organizations/[ORGANIZATION_ID]/logs/[LOG_ID] billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID] folders/[FOLDER_ID]/logs/[LOG_ID][LOG_ID] must be URL-encoded. For example, "projects/my-project-id/logs/syslog", "organizations/123/logs/cloudaudit.googleapis.com%2Factivity".For more information about log names, see LogEntry. (required)
diff --git a/docs/dyn/logging_v2.organizations.logs.html b/docs/dyn/logging_v2.organizations.logs.html
index d1221442aa..620ff02bb6 100644
--- a/docs/dyn/logging_v2.organizations.logs.html
+++ b/docs/dyn/logging_v2.organizations.logs.html
@@ -79,7 +79,7 @@ 

Instance Methods

Close httplib2 connections.

delete(logName, x__xgafv=None)

-

Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

+

Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

list(parent, pageSize=None, pageToken=None, resourceNames=None, x__xgafv=None)

Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.

@@ -94,7 +94,7 @@

Method Details

delete(logName, x__xgafv=None) -
Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
+  
Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
 
 Args:
   logName: string, Required. The resource name of the log to delete: projects/[PROJECT_ID]/logs/[LOG_ID] organizations/[ORGANIZATION_ID]/logs/[LOG_ID] billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID] folders/[FOLDER_ID]/logs/[LOG_ID][LOG_ID] must be URL-encoded. For example, "projects/my-project-id/logs/syslog", "organizations/123/logs/cloudaudit.googleapis.com%2Factivity".For more information about log names, see LogEntry. (required)
diff --git a/docs/dyn/logging_v2.projects.logs.html b/docs/dyn/logging_v2.projects.logs.html
index 642e351bae..1b6082cf0e 100644
--- a/docs/dyn/logging_v2.projects.logs.html
+++ b/docs/dyn/logging_v2.projects.logs.html
@@ -79,7 +79,7 @@ 

Instance Methods

Close httplib2 connections.

delete(logName, x__xgafv=None)

-

Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

+

Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.

list(parent, pageSize=None, pageToken=None, resourceNames=None, x__xgafv=None)

Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.

@@ -94,7 +94,7 @@

Method Details

delete(logName, x__xgafv=None) -
Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
+  
Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.
 
 Args:
   logName: string, Required. The resource name of the log to delete: projects/[PROJECT_ID]/logs/[LOG_ID] organizations/[ORGANIZATION_ID]/logs/[LOG_ID] billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID] folders/[FOLDER_ID]/logs/[LOG_ID][LOG_ID] must be URL-encoded. For example, "projects/my-project-id/logs/syslog", "organizations/123/logs/cloudaudit.googleapis.com%2Factivity".For more information about log names, see LogEntry. (required)
diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html
index f49ba24b1a..a7bc625ae7 100644
--- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html
+++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html
@@ -534,12 +534,6 @@ 

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -1029,12 +1023,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -1403,12 +1391,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -1898,12 +1880,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -2304,12 +2280,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -2799,12 +2769,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -3186,12 +3150,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -3681,12 +3639,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -4070,12 +4022,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -4565,12 +4511,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -4934,12 +4874,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -5429,12 +5363,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -5693,12 +5621,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -6143,12 +6065,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html index 1348bcc815..821234d015 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html @@ -323,12 +323,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -773,12 +767,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -1059,12 +1047,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, @@ -1509,12 +1491,6 @@

Method Details

"status": "A String", # Disk status (e.g. online). "totalCapacityBytes": "A String", # Disk capacity. "totalFreeBytes": "A String", # Disk free space. - "vmwareConfig": { # VMware disk config details. # VMware disk details. Deprecated - "backingType": "A String", # VMDK backing type. - "rdmCompatibilityMode": "A String", # RDM compatibility mode. - "shared": True or False, # Is VMDK shared with other VMs. - "vmdkDiskMode": "A String", # VMDK disk mode. - }, }, ], }, diff --git a/docs/dyn/monitoring_v3.projects.snoozes.html b/docs/dyn/monitoring_v3.projects.snoozes.html index 5a10e93289..3ec6f4f533 100644 --- a/docs/dyn/monitoring_v3.projects.snoozes.html +++ b/docs/dyn/monitoring_v3.projects.snoozes.html @@ -109,7 +109,7 @@

Method Details

{ # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], @@ -132,7 +132,7 @@

Method Details

{ # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], @@ -162,7 +162,7 @@

Method Details

{ # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], @@ -198,7 +198,7 @@

Method Details

"snoozes": [ # Snoozes matching this list call. { # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], @@ -239,7 +239,7 @@

Method Details

{ # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], @@ -263,7 +263,7 @@

Method Details

{ # A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time. "criteria": { # Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. # Required. This defines the criteria for applying the Snooze. See Criteria for more information. - "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of "1234567890", a metric label with an instance name of "group", a metadata user label with a key of "foo" and a value of "bar", and a metadata system label with a key of "region" and a value of "us-central1": "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" + "filter": "A String", # Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" "policies": [ # The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. "A String", ], diff --git a/docs/dyn/netapp_v1.projects.locations.volumes.replications.html b/docs/dyn/netapp_v1.projects.locations.volumes.replications.html index 7f6e35b014..c558dc5d45 100644 --- a/docs/dyn/netapp_v1.projects.locations.volumes.replications.html +++ b/docs/dyn/netapp_v1.projects.locations.volumes.replications.html @@ -142,13 +142,13 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. "labels": { # Resource labels to represent user provided metadata. @@ -316,13 +316,13 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. "labels": { # Resource labels to represent user provided metadata. @@ -386,13 +386,13 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. "labels": { # Resource labels to represent user provided metadata. @@ -463,13 +463,13 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. "labels": { # Resource labels to represent user provided metadata. diff --git a/docs/dyn/netapp_v1beta1.projects.locations.volumes.html b/docs/dyn/netapp_v1beta1.projects.locations.volumes.html index 1b63055d6d..48ec5b05c1 100644 --- a/docs/dyn/netapp_v1beta1.projects.locations.volumes.html +++ b/docs/dyn/netapp_v1beta1.projects.locations.volumes.html @@ -164,9 +164,11 @@

Method Details

"hybridReplicationParameters": { # The Hybrid Replication parameters for the volume. # Optional. The Hybrid Replication parameters for the volume. "clusterLocation": "A String", # Optional. Name of source cluster location associated with the Hybrid replication. This is a free-form field for the display purpose only. "description": "A String", # Optional. Description of the replication. + "hybridReplicationType": "A String", # Optional. Type of the hybrid replication. "labels": { # Optional. Labels to be added to the replication as the key value pairs. "a_key": "A String", }, + "largeVolumeConstituentCount": 42, # Optional. Constituent volume count for large volume. "peerClusterName": "A String", # Required. Name of the user's local source cluster to be peered with the destination cluster. "peerIpAddresses": [ # Required. List of node ip addresses to be peered with. "A String", @@ -174,6 +176,7 @@

Method Details

"peerSvmName": "A String", # Required. Name of the user's local source vserver svm to be peered with the destination vserver svm. "peerVolumeName": "A String", # Required. Name of the user's local source volume to be peered with the destination volume. "replication": "A String", # Required. Desired name for the replication of this volume. + "replicationSchedule": "A String", # Optional. Replication Schedule for the replication created. }, "kerberosEnabled": True or False, # Optional. Flag indicating if the volume is a kerberos volume or not, export policy rules control kerberos security modes (krb5, krb5i, krb5p). "kmsConfig": "A String", # Output only. Specifies the KMS config to be used for volume encryption. @@ -367,9 +370,11 @@

Method Details

"hybridReplicationParameters": { # The Hybrid Replication parameters for the volume. # Optional. The Hybrid Replication parameters for the volume. "clusterLocation": "A String", # Optional. Name of source cluster location associated with the Hybrid replication. This is a free-form field for the display purpose only. "description": "A String", # Optional. Description of the replication. + "hybridReplicationType": "A String", # Optional. Type of the hybrid replication. "labels": { # Optional. Labels to be added to the replication as the key value pairs. "a_key": "A String", }, + "largeVolumeConstituentCount": 42, # Optional. Constituent volume count for large volume. "peerClusterName": "A String", # Required. Name of the user's local source cluster to be peered with the destination cluster. "peerIpAddresses": [ # Required. List of node ip addresses to be peered with. "A String", @@ -377,6 +382,7 @@

Method Details

"peerSvmName": "A String", # Required. Name of the user's local source vserver svm to be peered with the destination vserver svm. "peerVolumeName": "A String", # Required. Name of the user's local source volume to be peered with the destination volume. "replication": "A String", # Required. Desired name for the replication of this volume. + "replicationSchedule": "A String", # Optional. Replication Schedule for the replication created. }, "kerberosEnabled": True or False, # Optional. Flag indicating if the volume is a kerberos volume or not, export policy rules control kerberos security modes (krb5, krb5i, krb5p). "kmsConfig": "A String", # Output only. Specifies the KMS config to be used for volume encryption. @@ -515,9 +521,11 @@

Method Details

"hybridReplicationParameters": { # The Hybrid Replication parameters for the volume. # Optional. The Hybrid Replication parameters for the volume. "clusterLocation": "A String", # Optional. Name of source cluster location associated with the Hybrid replication. This is a free-form field for the display purpose only. "description": "A String", # Optional. Description of the replication. + "hybridReplicationType": "A String", # Optional. Type of the hybrid replication. "labels": { # Optional. Labels to be added to the replication as the key value pairs. "a_key": "A String", }, + "largeVolumeConstituentCount": 42, # Optional. Constituent volume count for large volume. "peerClusterName": "A String", # Required. Name of the user's local source cluster to be peered with the destination cluster. "peerIpAddresses": [ # Required. List of node ip addresses to be peered with. "A String", @@ -525,6 +533,7 @@

Method Details

"peerSvmName": "A String", # Required. Name of the user's local source vserver svm to be peered with the destination vserver svm. "peerVolumeName": "A String", # Required. Name of the user's local source volume to be peered with the destination volume. "replication": "A String", # Required. Desired name for the replication of this volume. + "replicationSchedule": "A String", # Optional. Replication Schedule for the replication created. }, "kerberosEnabled": True or False, # Optional. Flag indicating if the volume is a kerberos volume or not, export policy rules control kerberos security modes (krb5, krb5i, krb5p). "kmsConfig": "A String", # Output only. Specifies the KMS config to be used for volume encryption. @@ -664,9 +673,11 @@

Method Details

"hybridReplicationParameters": { # The Hybrid Replication parameters for the volume. # Optional. The Hybrid Replication parameters for the volume. "clusterLocation": "A String", # Optional. Name of source cluster location associated with the Hybrid replication. This is a free-form field for the display purpose only. "description": "A String", # Optional. Description of the replication. + "hybridReplicationType": "A String", # Optional. Type of the hybrid replication. "labels": { # Optional. Labels to be added to the replication as the key value pairs. "a_key": "A String", }, + "largeVolumeConstituentCount": 42, # Optional. Constituent volume count for large volume. "peerClusterName": "A String", # Required. Name of the user's local source cluster to be peered with the destination cluster. "peerIpAddresses": [ # Required. List of node ip addresses to be peered with. "A String", @@ -674,6 +685,7 @@

Method Details

"peerSvmName": "A String", # Required. Name of the user's local source vserver svm to be peered with the destination vserver svm. "peerVolumeName": "A String", # Required. Name of the user's local source volume to be peered with the destination volume. "replication": "A String", # Required. Desired name for the replication of this volume. + "replicationSchedule": "A String", # Optional. Replication Schedule for the replication created. }, "kerberosEnabled": True or False, # Optional. Flag indicating if the volume is a kerberos volume or not, export policy rules control kerberos security modes (krb5, krb5i, krb5p). "kmsConfig": "A String", # Output only. Specifies the KMS config to be used for volume encryption. diff --git a/docs/dyn/netapp_v1beta1.projects.locations.volumes.replications.html b/docs/dyn/netapp_v1beta1.projects.locations.volumes.replications.html index bd760937fb..9131dabf4d 100644 --- a/docs/dyn/netapp_v1beta1.projects.locations.volumes.replications.html +++ b/docs/dyn/netapp_v1beta1.projects.locations.volumes.replications.html @@ -143,15 +143,20 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. + "hybridReplicationUserCommands": { # UserCommands contains the commands to be executed by the customer. # Output only. Copy pastable snapmirror commands to be executed on onprem cluster by the customer. + "commands": [ # Output only. List of commands to be executed by the customer. + "A String", + ], + }, "labels": { # Resource labels to represent user provided metadata. "a_key": "A String", }, @@ -318,15 +323,20 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. + "hybridReplicationUserCommands": { # UserCommands contains the commands to be executed by the customer. # Output only. Copy pastable snapmirror commands to be executed on onprem cluster by the customer. + "commands": [ # Output only. List of commands to be executed by the customer. + "A String", + ], + }, "labels": { # Resource labels to represent user provided metadata. "a_key": "A String", }, @@ -389,15 +399,20 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. + "hybridReplicationUserCommands": { # UserCommands contains the commands to be executed by the customer. # Output only. Copy pastable snapmirror commands to be executed on onprem cluster by the customer. + "commands": [ # Output only. List of commands to be executed by the customer. + "A String", + ], + }, "labels": { # Resource labels to represent user provided metadata. "a_key": "A String", }, @@ -467,15 +482,20 @@

Method Details

}, "healthy": True or False, # Output only. Condition of the relationship. Can be one of the following: - true: The replication relationship is healthy. It has not missed the most recent scheduled transfer. - false: The replication relationship is not healthy. It has missed the most recent scheduled transfer. "hybridPeeringDetails": { # HybridPeeringDetails contains details about the hybrid peering. # Output only. Hybrid peering details. - "command": "A String", # Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. - "commandExpiryTime": "A String", # Optional. Expiration time for the peering command to be executed on user's ONTAP. - "passphrase": "A String", # Optional. Temporary passphrase generated to accept cluster peering command. - "peerClusterName": "A String", # Optional. Name of the user's local source cluster to be peered with the destination cluster. - "peerSvmName": "A String", # Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. - "peerVolumeName": "A String", # Optional. Name of the user's local source volume to be peered with the destination volume. - "subnetIp": "A String", # Optional. IP address of the subnet. + "command": "A String", # Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. + "commandExpiryTime": "A String", # Output only. Expiration time for the peering command to be executed on user's ONTAP. + "passphrase": "A String", # Output only. Temporary passphrase generated to accept cluster peering command. + "peerClusterName": "A String", # Output only. Name of the user's local source cluster to be peered with the destination cluster. + "peerSvmName": "A String", # Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm. + "peerVolumeName": "A String", # Output only. Name of the user's local source volume to be peered with the destination volume. + "subnetIp": "A String", # Output only. IP address of the subnet. }, "hybridReplicationType": "A String", # Output only. Type of the hybrid replication. + "hybridReplicationUserCommands": { # UserCommands contains the commands to be executed by the customer. # Output only. Copy pastable snapmirror commands to be executed on onprem cluster by the customer. + "commands": [ # Output only. List of commands to be executed by the customer. + "A String", + ], + }, "labels": { # Resource labels to represent user provided metadata. "a_key": "A String", }, diff --git a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html index dcbf8cef73..00fc23f0ef 100644 --- a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html @@ -277,6 +277,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -414,7 +415,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -590,6 +591,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -727,7 +729,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1102,6 +1104,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -1239,7 +1242,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1415,6 +1418,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -1552,7 +1556,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1918,6 +1922,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2055,7 +2060,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -2231,6 +2236,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2368,7 +2374,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -2693,6 +2699,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2830,7 +2837,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -3006,6 +3013,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -3143,7 +3151,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. diff --git a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html index e2c129d402..ae6061110e 100644 --- a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html @@ -278,6 +278,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -415,7 +416,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -591,6 +592,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -728,7 +730,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1105,6 +1107,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -1242,7 +1245,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1418,6 +1421,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -1555,7 +1559,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -1923,6 +1927,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2060,7 +2065,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -2236,6 +2241,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2373,7 +2379,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -2700,6 +2706,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -2837,7 +2844,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. @@ -3013,6 +3020,7 @@

Method Details

"uri": "A String", # URI of a Cloud SQL instance. }, "deliver": { # Details of the final state "deliver" and associated resource. # Display information of the final state "deliver" and reason. + "googleServiceType": "A String", # Recognized type of a Google Service the packet is delivered to (if applicable). "ipAddress": "A String", # IP address of the target (if applicable). "pscGoogleApiTarget": "A String", # PSC Google API target the packet is delivered to (if applicable). "resourceUri": "A String", # URI of the resource that the packet is delivered to. @@ -3150,7 +3158,7 @@

Method Details

"routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. "type": "A String", # Type of NAT. }, - "network": { # For display only. Metadata associated with a Compute Engine network. Next ID: 7 # Display information of a Google Cloud network. + "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range of the subnet matching the source IP address of the test. "matchedSubnetUri": "A String", # URI of the subnet matching the source IP address of the test. diff --git a/docs/dyn/notebooks_v2.projects.locations.instances.html b/docs/dyn/notebooks_v2.projects.locations.instances.html index 5964350f21..e7f92ea6d3 100644 --- a/docs/dyn/notebooks_v2.projects.locations.instances.html +++ b/docs/dyn/notebooks_v2.projects.locations.instances.html @@ -268,7 +268,7 @@

Method Details

}, "healthState": "A String", # Output only. Instance health_state. "id": "A String", # Output only. Unique ID of the resource. - "instanceOwners": [ # Optional. Input only. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. + "instanceOwners": [ # Optional. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. "A String", ], "labels": { # Optional. Labels to apply to this instance. These can be later modified by the UpdateInstance method. @@ -517,7 +517,7 @@

Method Details

}, "healthState": "A String", # Output only. Instance health_state. "id": "A String", # Output only. Unique ID of the resource. - "instanceOwners": [ # Optional. Input only. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. + "instanceOwners": [ # Optional. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. "A String", ], "labels": { # Optional. Labels to apply to this instance. These can be later modified by the UpdateInstance method. @@ -728,7 +728,7 @@

Method Details

}, "healthState": "A String", # Output only. Instance health_state. "id": "A String", # Output only. Unique ID of the resource. - "instanceOwners": [ # Optional. Input only. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. + "instanceOwners": [ # Optional. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. "A String", ], "labels": { # Optional. Labels to apply to this instance. These can be later modified by the UpdateInstance method. @@ -877,7 +877,7 @@

Method Details

}, "healthState": "A String", # Output only. Instance health_state. "id": "A String", # Output only. Unique ID of the resource. - "instanceOwners": [ # Optional. Input only. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. + "instanceOwners": [ # Optional. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance. "A String", ], "labels": { # Optional. Labels to apply to this instance. These can be later modified by the UpdateInstance method. diff --git a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html index 4b16833a97..4e1ede99bf 100644 --- a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html +++ b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html @@ -540,6 +540,14 @@

Method Details

"archiveTime": "A String", # Output only. The time occurrences related to this discovery occurrence were archived. "continuousAnalysis": "A String", # Whether the resource is continuously analyzed. "cpe": "A String", # The CPE of the resource being scanned. + "files": [ # Files that make up the resource described by the occurrence. + { + "digest": { + "a_key": "A String", + }, + "name": "A String", + }, + ], "lastScanTime": "A String", # The last time this resource was scanned. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. diff --git a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html index ac69b87ff7..bf806ee9e5 100644 --- a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html +++ b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html @@ -540,6 +540,14 @@

Method Details

"archiveTime": "A String", # Output only. The time occurrences related to this discovery occurrence were archived. "continuousAnalysis": "A String", # Whether the resource is continuously analyzed. "cpe": "A String", # The CPE of the resource being scanned. + "files": [ # Files that make up the resource described by the occurrence. + { + "digest": { + "a_key": "A String", + }, + "name": "A String", + }, + ], "lastScanTime": "A String", # The last time this resource was scanned. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. diff --git a/docs/dyn/oracledatabase_v1.projects.locations.cloudVmClusters.dbNodes.html b/docs/dyn/oracledatabase_v1.projects.locations.cloudVmClusters.dbNodes.html index 66439da836..c6b230634d 100644 --- a/docs/dyn/oracledatabase_v1.projects.locations.cloudVmClusters.dbNodes.html +++ b/docs/dyn/oracledatabase_v1.projects.locations.cloudVmClusters.dbNodes.html @@ -94,7 +94,7 @@

Method Details

Lists the database nodes of a VM Cluster.
 
 Args:
-  parent: string, Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. (required)
+  parent: string, Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. . (required)
   pageSize: integer, Optional. The maximum number of items to return. If unspecified, at most 50 db nodes will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, Optional. A token identifying a page of results the node should return.
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/oslogin_v1.users.html b/docs/dyn/oslogin_v1.users.html
index 46edfb5043..f6f8e64117 100644
--- a/docs/dyn/oslogin_v1.users.html
+++ b/docs/dyn/oslogin_v1.users.html
@@ -105,8 +105,8 @@ 

Method Details

Args: name: string, Required. The unique ID for the user in format `users/{user}`. (required) - projectId: string, The project ID of the Google Cloud Platform project. - systemId: string, A system ID for filtering the results of the request. + projectId: string, Required. The project ID of the Google Cloud Platform project. + systemId: string, Optional. A system ID for filtering the results of the request. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/oslogin_v1.users.sshPublicKeys.html b/docs/dyn/oslogin_v1.users.sshPublicKeys.html index cc576ce9d0..548c2f848e 100644 --- a/docs/dyn/oslogin_v1.users.sshPublicKeys.html +++ b/docs/dyn/oslogin_v1.users.sshPublicKeys.html @@ -183,7 +183,7 @@

Method Details

"name": "A String", # Output only. The canonical resource name. } - updateMask: string, Mask to control which fields get updated. Updates all if not present. + updateMask: string, Optional. Mask to control which fields get updated. Updates all if not present. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/oslogin_v1alpha.users.html b/docs/dyn/oslogin_v1alpha.users.html index 22fac029c1..8ea957b144 100644 --- a/docs/dyn/oslogin_v1alpha.users.html +++ b/docs/dyn/oslogin_v1alpha.users.html @@ -105,13 +105,13 @@

Method Details

Args: name: string, Required. The unique ID for the user in format `users/{user}`. (required) - operatingSystemType: string, The type of operating system associated with the account. + operatingSystemType: string, Optional. The type of operating system associated with the account. Allowed values OPERATING_SYSTEM_TYPE_UNSPECIFIED - The operating system type associated with the user account information is unspecified. LINUX - Linux user account information. WINDOWS - Windows user account information. - projectId: string, The project ID of the Google Cloud Platform project. - systemId: string, A system ID for filtering the results of the request. + projectId: string, Required. The project ID of the Google Cloud Platform project. + systemId: string, Optional. A system ID for filtering the results of the request. view: string, The view configures whether to retrieve security keys information. Allowed values LOGIN_PROFILE_VIEW_UNSPECIFIED - The default login profile view. The API defaults to the BASIC view. diff --git a/docs/dyn/oslogin_v1alpha.users.projects.html b/docs/dyn/oslogin_v1alpha.users.projects.html index 5aa7a1e85a..4b7bb34c74 100644 --- a/docs/dyn/oslogin_v1alpha.users.projects.html +++ b/docs/dyn/oslogin_v1alpha.users.projects.html @@ -105,7 +105,7 @@

Method Details

Args: name: string, Required. A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format `users/{user}/projects/{project}`. (required) - operatingSystemType: string, The type of operating system associated with the account. + operatingSystemType: string, Optional. The type of operating system associated with the account. Allowed values OPERATING_SYSTEM_TYPE_UNSPECIFIED - The operating system type associated with the user account information is unspecified. LINUX - Linux user account information. diff --git a/docs/dyn/oslogin_v1alpha.users.projects.locations.html b/docs/dyn/oslogin_v1alpha.users.projects.locations.html index 2cc058911c..c63f2e4ef9 100644 --- a/docs/dyn/oslogin_v1alpha.users.projects.locations.html +++ b/docs/dyn/oslogin_v1alpha.users.projects.locations.html @@ -91,12 +91,12 @@

Method Details

Signs an SSH public key for a user to authenticate to a virtual machine on Google Compute Engine.
 
 Args:
-  parent: string, The parent project and region for the signing request. (required)
+  parent: string, Required. The parent project and region for the signing request. (required)
   body: object, The request body.
     The object takes the form of:
 
 {
-  "sshPublicKey": "A String", # The SSH public key to sign.
+  "sshPublicKey": "A String", # Required. The SSH public key to sign.
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/oslogin_v1alpha.users.projects.zones.html b/docs/dyn/oslogin_v1alpha.users.projects.zones.html
index d0f8a57e14..02983a46e9 100644
--- a/docs/dyn/oslogin_v1alpha.users.projects.zones.html
+++ b/docs/dyn/oslogin_v1alpha.users.projects.zones.html
@@ -91,12 +91,12 @@ 

Method Details

Signs an SSH public key for a user to authenticate to a virtual machine on Google Compute Engine.
 
 Args:
-  parent: string, The parent project and region for the signing request. (required)
+  parent: string, Required. The parent project and region for the signing request. (required)
   body: object, The request body.
     The object takes the form of:
 
 {
-  "sshPublicKey": "A String", # The SSH public key to sign.
+  "sshPublicKey": "A String", # Required. The SSH public key to sign.
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/oslogin_v1alpha.users.sshPublicKeys.html b/docs/dyn/oslogin_v1alpha.users.sshPublicKeys.html
index dab942e66e..29c4cadf1c 100644
--- a/docs/dyn/oslogin_v1alpha.users.sshPublicKeys.html
+++ b/docs/dyn/oslogin_v1alpha.users.sshPublicKeys.html
@@ -183,7 +183,7 @@ 

Method Details

"name": "A String", # Output only. The canonical resource name. } - updateMask: string, Mask to control which fields get updated. Updates all if not present. + updateMask: string, Optional. Mask to control which fields get updated. Updates all if not present. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/oslogin_v1beta.users.html b/docs/dyn/oslogin_v1beta.users.html index 288cb1c23e..20662c9194 100644 --- a/docs/dyn/oslogin_v1beta.users.html +++ b/docs/dyn/oslogin_v1beta.users.html @@ -105,8 +105,8 @@

Method Details

Args: name: string, Required. The unique ID for the user in format `users/{user}`. (required) - projectId: string, The project ID of the Google Cloud Platform project. - systemId: string, A system ID for filtering the results of the request. + projectId: string, Required. The project ID of the Google Cloud Platform project. + systemId: string, Optional. A system ID for filtering the results of the request. view: string, The view configures whether to retrieve security keys information. Allowed values LOGIN_PROFILE_VIEW_UNSPECIFIED - The default login profile view. The API defaults to the BASIC view. diff --git a/docs/dyn/oslogin_v1beta.users.projects.locations.html b/docs/dyn/oslogin_v1beta.users.projects.locations.html index d008c7571f..b74e7d5fd9 100644 --- a/docs/dyn/oslogin_v1beta.users.projects.locations.html +++ b/docs/dyn/oslogin_v1beta.users.projects.locations.html @@ -91,12 +91,12 @@

Method Details

Signs an SSH public key for a user to authenticate to an instance.
 
 Args:
-  parent: string, The parent project and region for the signing request. (required)
+  parent: string, Required. The parent project and region for the signing request. (required)
   body: object, The request body.
     The object takes the form of:
 
 {
-  "sshPublicKey": "A String", # The SSH public key to sign.
+  "sshPublicKey": "A String", # Required. The SSH public key to sign.
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/oslogin_v1beta.users.projects.zones.html b/docs/dyn/oslogin_v1beta.users.projects.zones.html
index 591394858d..474927f9e8 100644
--- a/docs/dyn/oslogin_v1beta.users.projects.zones.html
+++ b/docs/dyn/oslogin_v1beta.users.projects.zones.html
@@ -91,12 +91,12 @@ 

Method Details

Signs an SSH public key for a user to authenticate to an instance.
 
 Args:
-  parent: string, The parent project and region for the signing request. (required)
+  parent: string, Required. The parent project and region for the signing request. (required)
   body: object, The request body.
     The object takes the form of:
 
 {
-  "sshPublicKey": "A String", # The SSH public key to sign.
+  "sshPublicKey": "A String", # Required. The SSH public key to sign.
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/playintegrity_v1.v1.html b/docs/dyn/playintegrity_v1.v1.html
index de30c3d8cb..c730b9e646 100644
--- a/docs/dyn/playintegrity_v1.v1.html
+++ b/docs/dyn/playintegrity_v1.v1.html
@@ -80,6 +80,9 @@ 

Instance Methods

decodeIntegrityToken(packageName, body=None, x__xgafv=None)

Decodes the integrity token and returns the token payload.

+

+ decodePcIntegrityToken(packageName, body=None, x__xgafv=None)

+

Decodes the PC integrity token and returns the PC token payload.

Method Details

close() @@ -170,4 +173,41 @@

Method Details

}
+
+ decodePcIntegrityToken(packageName, body=None, x__xgafv=None) +
Decodes the PC integrity token and returns the PC token payload.
+
+Args:
+  packageName: string, Package name of the app the attached integrity token belongs to. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request to decode the PC integrity token.
+  "integrityToken": "A String", # Encoded integrity token.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response containing the decoded PC integrity payload.
+  "tokenPayloadExternal": { # Contains PC device attestation details. # Plain token payload generated from the decoded integrity token.
+    "deviceIntegrity": { # Contains the device attestation information. # Required. Details about the device integrity.
+      "deviceRecognitionVerdict": [ # Details about the integrity of the device the app is running on.
+        "A String",
+      ],
+    },
+    "requestDetails": { # Contains the integrity request information. # Required. Details about the integrity request.
+      "requestHash": "A String", # Request hash that was provided in the request.
+      "requestPackageName": "A String", # Required. Application package name this attestation was requested for. Note: This field makes no guarantees or promises on the caller integrity.
+      "requestTime": "A String", # Required. Timestamp, of the integrity application request.
+    },
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index 84fc59a34e..15116d8473 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -601,6 +601,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html index e16f156bab..45adcadaf7 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html @@ -1152,6 +1152,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.html index e325718b6b..ca594fb305 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.html @@ -522,7 +522,8 @@

Method Details

], "feeds": [ # Criteria for the Merchant Center feeds to be ingested via the link. All offers will be ingested if the list is empty. Otherwise the offers will be ingested from selected feeds. { # Merchant Center Feed filter criterion. - "primaryFeedId": "A String", # Merchant Center primary feed ID. + "dataSourceId": "A String", # AFM data source ID. + "primaryFeedId": "A String", # Merchant Center primary feed ID. Deprecated: use data_source_id instead. "primaryFeedName": "A String", # Merchant Center primary feed name. The name is used for the display purposes only. }, ], @@ -577,7 +578,8 @@

Method Details

], "feeds": [ # Criteria for the Merchant Center feeds to be ingested via the link. All offers will be ingested if the list is empty. Otherwise the offers will be ingested from selected feeds. { # Merchant Center Feed filter criterion. - "primaryFeedId": "A String", # Merchant Center primary feed ID. + "dataSourceId": "A String", # AFM data source ID. + "primaryFeedId": "A String", # Merchant Center primary feed ID. Deprecated: use data_source_id instead. "primaryFeedName": "A String", # Merchant Center primary feed name. The name is used for the display purposes only. }, ], @@ -614,7 +616,8 @@

Method Details

], "feeds": [ # Criteria for the Merchant Center feeds to be ingested via the link. All offers will be ingested if the list is empty. Otherwise the offers will be ingested from selected feeds. { # Merchant Center Feed filter criterion. - "primaryFeedId": "A String", # Merchant Center primary feed ID. + "dataSourceId": "A String", # AFM data source ID. + "primaryFeedId": "A String", # Merchant Center primary feed ID. Deprecated: use data_source_id instead. "primaryFeedName": "A String", # Merchant Center primary feed name. The name is used for the display purposes only. }, ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.merchantCenterAccountLinks.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.merchantCenterAccountLinks.html index 964c51fd68..bde67c5941 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.merchantCenterAccountLinks.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.merchantCenterAccountLinks.html @@ -105,7 +105,8 @@

Method Details

"branchId": "A String", # Required. The branch ID (e.g. 0/1/2) within the catalog that products from merchant_center_account_id are streamed to. When updating this field, an empty value will use the currently configured default branch. However, changing the default branch later on won't change the linked branch here. A single branch ID can only have one linked Merchant Center account ID. "feedFilters": [ # Criteria for the Merchant Center feeds to be ingested via the link. All offers will be ingested if the list is empty. Otherwise the offers will be ingested from selected feeds. { # Merchant Center Feed filter criterion. - "primaryFeedId": "A String", # Merchant Center primary feed ID. + "dataSourceId": "A String", # AFM data source ID. + "primaryFeedId": "A String", # Merchant Center primary feed ID. Deprecated: use data_source_id instead. "primaryFeedName": "A String", # Merchant Center primary feed name. The name is used for the display purposes only. }, ], @@ -186,7 +187,8 @@

Method Details

"branchId": "A String", # Required. The branch ID (e.g. 0/1/2) within the catalog that products from merchant_center_account_id are streamed to. When updating this field, an empty value will use the currently configured default branch. However, changing the default branch later on won't change the linked branch here. A single branch ID can only have one linked Merchant Center account ID. "feedFilters": [ # Criteria for the Merchant Center feeds to be ingested via the link. All offers will be ingested if the list is empty. Otherwise the offers will be ingested from selected feeds. { # Merchant Center Feed filter criterion. - "primaryFeedId": "A String", # Merchant Center primary feed ID. + "dataSourceId": "A String", # AFM data source ID. + "primaryFeedId": "A String", # Merchant Center primary feed ID. Deprecated: use data_source_id instead. "primaryFeedName": "A String", # Merchant Center primary feed name. The name is used for the display purposes only. }, ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index 47eaa3216f..674a9da404 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -695,6 +695,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html index 81607a8955..5ffa5abad8 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html @@ -1246,6 +1246,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index ad66ed19eb..4465482006 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -601,6 +601,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html index fa8e26ca9b..06f4011da8 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html @@ -1152,6 +1152,13 @@

Method Details

"matchingVariantFields": { # If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key "sku1" with field mask "products.color_info" indicates there is a match between "sku1" ColorInfo and the query. "a_key": "A String", }, + "modelScores": { # Google provided available scores. + "a_key": { # A message with a list of double values. + "values": [ # The list of double values. + 3.14, + ], + }, + }, "personalLabels": [ # Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before. "A String", ], diff --git a/docs/dyn/safebrowsing_v5.hashList.html b/docs/dyn/safebrowsing_v5.hashList.html new file mode 100644 index 0000000000..04fa9ee887 --- /dev/null +++ b/docs/dyn/safebrowsing_v5.hashList.html @@ -0,0 +1,159 @@ + + + +

Safe Browsing API . hashList

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, sizeConstraints_maxDatabaseEntries=None, sizeConstraints_maxUpdateEntries=None, version=None, x__xgafv=None)

+

Get the latest contents of a hash list. A hash list may either by a threat list or a non-threat list such as the Global Cache. This is a standard Get method as defined by https://google.aip.dev/131 and the HTTP method is also GET.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, sizeConstraints_maxDatabaseEntries=None, sizeConstraints_maxUpdateEntries=None, version=None, x__xgafv=None) +
Get the latest contents of a hash list. A hash list may either by a threat list or a non-threat list such as the Global Cache. This is a standard Get method as defined by https://google.aip.dev/131 and the HTTP method is also GET.
+
+Args:
+  name: string, Required. The name of this particular hash list. It may be a threat list, or it may be the Global Cache. (required)
+  sizeConstraints_maxDatabaseEntries: integer, Sets the maximum number of entries that the client is willing to have in the local database for the list. (The server MAY cause the client to store less than this number of entries.) If omitted or zero, no database size limit is set.
+  sizeConstraints_maxUpdateEntries: integer, The maximum size in number of entries. The update will not contain more entries than this value, but it is possible that the update will contain fewer entries than this value. This MUST be at least 1024. If omitted or zero, no update size limit is set.
+  version: string, The version of the hash list that the client already has. If this is the first time the client is fetching the hash list, this field MUST be left empty. Otherwise, the client SHOULD supply the version previously received from the server. The client MUST NOT manipulate those bytes. **What's new in V5**: in V4 of the API, this was called `states`; it is now renamed to `version` for clarity.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of hashes identified by its name.
+  "additionsEightBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 64-bit numbers. # The 8-byte additions.
+    "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+    "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+    "firstValue": "A String", # The first entry in the encoded data (hashes), or, if only a single hash prefix was encoded, that entry's value. If the field is empty, the entry is zero.
+    "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 35 and 62, inclusive.
+  },
+  "additionsFourBytes": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The 4-byte additions.
+    "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+    "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+    "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+    "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+  },
+  "additionsSixteenBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 128-bit numbers. # The 16-byte additions.
+    "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+    "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+    "firstValueHi": "A String", # The upper 64 bits of the first entry in the encoded data (hashes). If the field is empty, the upper 64 bits are all zero.
+    "firstValueLo": "A String", # The lower 64 bits of the first entry in the encoded data (hashes). If the field is empty, the lower 64 bits are all zero.
+    "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 99 and 126, inclusive.
+  },
+  "additionsThirtyTwoBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 256-bit numbers. # The 32-byte additions.
+    "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+    "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+    "firstValueFirstPart": "A String", # The first 64 bits of the first entry in the encoded data (hashes). If the field is empty, the first 64 bits are all zero.
+    "firstValueFourthPart": "A String", # The last 64 bits of the first entry in the encoded data (hashes). If the field is empty, the last 64 bits are all zero.
+    "firstValueSecondPart": "A String", # The 65 through 128th bits of the first entry in the encoded data (hashes). If the field is empty, the 65 through 128th bits are all zero.
+    "firstValueThirdPart": "A String", # The 129 through 192th bits of the first entry in the encoded data (hashes). If the field is empty, the 129 through 192th bits are all zero.
+    "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 227 and 254, inclusive.
+  },
+  "compressedRemovals": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The Rice-delta encoded version of removal indices. Since each hash list definitely has less than 2^32 entries, the indices are treated as 32-bit integers and encoded.
+    "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+    "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+    "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+    "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+  },
+  "metadata": { # Metadata about a particular hash list. # Metadata about the hash list. This is not populated by the `GetHashList` method, but this is populated by the `ListHashLists` method.
+    "description": "A String", # A human-readable description about this list. Written in English.
+    "hashLength": "A String", # The supported hash length for this hash list. Each hash list will support exactly one length. If a different hash length is introduced for the same set of threat types or safe types, it will be introduced as a separate list with a distinct name and respective hash length set.
+    "likelySafeTypes": [ # Unordered list. If not empty, this specifies that the hash list represents a list of likely safe hashes, and this enumerates the ways they are considered likely safe. This field is mutually exclusive with the threat_types field.
+      "A String",
+    ],
+    "threatTypes": [ # Unordered list. If not empty, this specifies that the hash list is a kind of threat list, and this enumerates the kind of threats associated with hashes or hash prefixes in this hash list. May be empty if the entry does not represent a threat, i.e. in the case that it represents a likely safe type.
+      "A String",
+    ],
+  },
+  "minimumWaitDuration": "A String", # Clients should wait at least this long to get the hash list again. If omitted or zero, clients SHOULD fetch immediately because it indicates that the server has an additional update to be sent to the client, but could not due to the client-specified constraints.
+  "name": "A String", # The name of the hash list. Note that the Global Cache is also just a hash list and can be referred to here.
+  "partialUpdate": True or False, # When true, this is a partial diff containing additions and removals based on what the client already has. When false, this is the complete hash list. When false, the client MUST delete any locally stored version for this hash list. This means that either the version possessed by the client is seriously out-of-date or the client data is believed to be corrupt. The `compressed_removals` field will be empty. When true, the client MUST apply an incremental update by applying removals and then additions.
+  "sha256Checksum": "A String", # The sorted list of all hashes, hashed again with SHA256. This is the checksum for the sorted list of all hashes present in the database after applying the provided update. In the case that no updates were provided, the server will omit this field to indicate that the client should use the existing checksum.
+  "version": "A String", # The version of the hash list. The client MUST NOT manipulate those bytes.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/safebrowsing_v5.hashLists.html b/docs/dyn/safebrowsing_v5.hashLists.html new file mode 100644 index 0000000000..2b808c8ece --- /dev/null +++ b/docs/dyn/safebrowsing_v5.hashLists.html @@ -0,0 +1,256 @@ + + + +

Safe Browsing API . hashLists

+

Instance Methods

+

+ batchGet(names=None, sizeConstraints_maxDatabaseEntries=None, sizeConstraints_maxUpdateEntries=None, version=None, x__xgafv=None)

+

Get multiple hash lists at once. It is very common for a client to need to get multiple hash lists. Using this method is preferred over using the regular Get method multiple times. This is a standard batch Get method as defined by https://google.aip.dev/231 and the HTTP method is also GET.

+

+ close()

+

Close httplib2 connections.

+

+ list(pageSize=None, pageToken=None, x__xgafv=None)

+

List hash lists. In the V5 API, Google will never remove a hash list that has ever been returned by this method. This enables clients to skip using this method and simply hard-code all hash lists they need. This is a standard List method as defined by https://google.aip.dev/132 and the HTTP method is GET.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ batchGet(names=None, sizeConstraints_maxDatabaseEntries=None, sizeConstraints_maxUpdateEntries=None, version=None, x__xgafv=None) +
Get multiple hash lists at once. It is very common for a client to need to get multiple hash lists. Using this method is preferred over using the regular Get method multiple times. This is a standard batch Get method as defined by https://google.aip.dev/231 and the HTTP method is also GET.
+
+Args:
+  names: string, Required. The names of the particular hash lists. The list MAY be a threat list, or it may be the Global Cache. The names MUST NOT contain duplicates; if they did, the client will get an error. (repeated)
+  sizeConstraints_maxDatabaseEntries: integer, Sets the maximum number of entries that the client is willing to have in the local database for the list. (The server MAY cause the client to store less than this number of entries.) If omitted or zero, no database size limit is set.
+  sizeConstraints_maxUpdateEntries: integer, The maximum size in number of entries. The update will not contain more entries than this value, but it is possible that the update will contain fewer entries than this value. This MUST be at least 1024. If omitted or zero, no update size limit is set.
+  version: string, The versions of the hash list that the client already has. If this is the first time the client is fetching the hash lists, the field should be left empty. Otherwise, the client should supply the versions previously received from the server. The client MUST NOT manipulate those bytes. The client need not send the versions in the same order as the corresponding list names. The client may send fewer or more versions in a request than there are names. However the client MUST NOT send multiple versions that correspond to the same name; if it did, the client will get an error. Historical note: in V4 of the API, this was called `states`; it is now renamed to `version` for clarity. (repeated)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response containing multiple hash lists.
+  "hashLists": [ # The hash lists in the same order given in the request.
+    { # A list of hashes identified by its name.
+      "additionsEightBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 64-bit numbers. # The 8-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": "A String", # The first entry in the encoded data (hashes), or, if only a single hash prefix was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 35 and 62, inclusive.
+      },
+      "additionsFourBytes": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The 4-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+      },
+      "additionsSixteenBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 128-bit numbers. # The 16-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValueHi": "A String", # The upper 64 bits of the first entry in the encoded data (hashes). If the field is empty, the upper 64 bits are all zero.
+        "firstValueLo": "A String", # The lower 64 bits of the first entry in the encoded data (hashes). If the field is empty, the lower 64 bits are all zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 99 and 126, inclusive.
+      },
+      "additionsThirtyTwoBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 256-bit numbers. # The 32-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValueFirstPart": "A String", # The first 64 bits of the first entry in the encoded data (hashes). If the field is empty, the first 64 bits are all zero.
+        "firstValueFourthPart": "A String", # The last 64 bits of the first entry in the encoded data (hashes). If the field is empty, the last 64 bits are all zero.
+        "firstValueSecondPart": "A String", # The 65 through 128th bits of the first entry in the encoded data (hashes). If the field is empty, the 65 through 128th bits are all zero.
+        "firstValueThirdPart": "A String", # The 129 through 192th bits of the first entry in the encoded data (hashes). If the field is empty, the 129 through 192th bits are all zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 227 and 254, inclusive.
+      },
+      "compressedRemovals": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The Rice-delta encoded version of removal indices. Since each hash list definitely has less than 2^32 entries, the indices are treated as 32-bit integers and encoded.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+      },
+      "metadata": { # Metadata about a particular hash list. # Metadata about the hash list. This is not populated by the `GetHashList` method, but this is populated by the `ListHashLists` method.
+        "description": "A String", # A human-readable description about this list. Written in English.
+        "hashLength": "A String", # The supported hash length for this hash list. Each hash list will support exactly one length. If a different hash length is introduced for the same set of threat types or safe types, it will be introduced as a separate list with a distinct name and respective hash length set.
+        "likelySafeTypes": [ # Unordered list. If not empty, this specifies that the hash list represents a list of likely safe hashes, and this enumerates the ways they are considered likely safe. This field is mutually exclusive with the threat_types field.
+          "A String",
+        ],
+        "threatTypes": [ # Unordered list. If not empty, this specifies that the hash list is a kind of threat list, and this enumerates the kind of threats associated with hashes or hash prefixes in this hash list. May be empty if the entry does not represent a threat, i.e. in the case that it represents a likely safe type.
+          "A String",
+        ],
+      },
+      "minimumWaitDuration": "A String", # Clients should wait at least this long to get the hash list again. If omitted or zero, clients SHOULD fetch immediately because it indicates that the server has an additional update to be sent to the client, but could not due to the client-specified constraints.
+      "name": "A String", # The name of the hash list. Note that the Global Cache is also just a hash list and can be referred to here.
+      "partialUpdate": True or False, # When true, this is a partial diff containing additions and removals based on what the client already has. When false, this is the complete hash list. When false, the client MUST delete any locally stored version for this hash list. This means that either the version possessed by the client is seriously out-of-date or the client data is believed to be corrupt. The `compressed_removals` field will be empty. When true, the client MUST apply an incremental update by applying removals and then additions.
+      "sha256Checksum": "A String", # The sorted list of all hashes, hashed again with SHA256. This is the checksum for the sorted list of all hashes present in the database after applying the provided update. In the case that no updates were provided, the server will omit this field to indicate that the client should use the existing checksum.
+      "version": "A String", # The version of the hash list. The client MUST NOT manipulate those bytes.
+    },
+  ],
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ list(pageSize=None, pageToken=None, x__xgafv=None) +
List hash lists. In the V5 API, Google will never remove a hash list that has ever been returned by this method. This enables clients to skip using this method and simply hard-code all hash lists they need. This is a standard List method as defined by https://google.aip.dev/132 and the HTTP method is GET.
+
+Args:
+  pageSize: integer, The maximum number of hash lists to return. The service may return fewer than this value. If unspecified, the server will choose a page size, which may be larger than the number of hash lists so that pagination is not necessary.
+  pageToken: string, A page token, received from a previous `ListHashLists` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response containing metadata about hash lists.
+  "hashLists": [ # The hash lists in an arbitrary order. Only metadata about the hash lists will be included, not the contents.
+    { # A list of hashes identified by its name.
+      "additionsEightBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 64-bit numbers. # The 8-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": "A String", # The first entry in the encoded data (hashes), or, if only a single hash prefix was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 35 and 62, inclusive.
+      },
+      "additionsFourBytes": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The 4-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+      },
+      "additionsSixteenBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 128-bit numbers. # The 16-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValueHi": "A String", # The upper 64 bits of the first entry in the encoded data (hashes). If the field is empty, the upper 64 bits are all zero.
+        "firstValueLo": "A String", # The lower 64 bits of the first entry in the encoded data (hashes). If the field is empty, the lower 64 bits are all zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 99 and 126, inclusive.
+      },
+      "additionsThirtyTwoBytes": { # Same as `RiceDeltaEncoded32Bit` except this encodes 256-bit numbers. # The 32-byte additions.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValueFirstPart": "A String", # The first 64 bits of the first entry in the encoded data (hashes). If the field is empty, the first 64 bits are all zero.
+        "firstValueFourthPart": "A String", # The last 64 bits of the first entry in the encoded data (hashes). If the field is empty, the last 64 bits are all zero.
+        "firstValueSecondPart": "A String", # The 65 through 128th bits of the first entry in the encoded data (hashes). If the field is empty, the 65 through 128th bits are all zero.
+        "firstValueThirdPart": "A String", # The 129 through 192th bits of the first entry in the encoded data (hashes). If the field is empty, the 129 through 192th bits are all zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 227 and 254, inclusive.
+      },
+      "compressedRemovals": { # The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step. # The Rice-delta encoded version of removal indices. Since each hash list definitely has less than 2^32 entries, the indices are treated as 32-bit integers and encoded.
+        "encodedData": "A String", # The encoded deltas that are encoded using the Golomb-Rice coder.
+        "entriesCount": 42, # The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.
+        "firstValue": 42, # The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.
+        "riceParameter": 42, # The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.
+      },
+      "metadata": { # Metadata about a particular hash list. # Metadata about the hash list. This is not populated by the `GetHashList` method, but this is populated by the `ListHashLists` method.
+        "description": "A String", # A human-readable description about this list. Written in English.
+        "hashLength": "A String", # The supported hash length for this hash list. Each hash list will support exactly one length. If a different hash length is introduced for the same set of threat types or safe types, it will be introduced as a separate list with a distinct name and respective hash length set.
+        "likelySafeTypes": [ # Unordered list. If not empty, this specifies that the hash list represents a list of likely safe hashes, and this enumerates the ways they are considered likely safe. This field is mutually exclusive with the threat_types field.
+          "A String",
+        ],
+        "threatTypes": [ # Unordered list. If not empty, this specifies that the hash list is a kind of threat list, and this enumerates the kind of threats associated with hashes or hash prefixes in this hash list. May be empty if the entry does not represent a threat, i.e. in the case that it represents a likely safe type.
+          "A String",
+        ],
+      },
+      "minimumWaitDuration": "A String", # Clients should wait at least this long to get the hash list again. If omitted or zero, clients SHOULD fetch immediately because it indicates that the server has an additional update to be sent to the client, but could not due to the client-specified constraints.
+      "name": "A String", # The name of the hash list. Note that the Global Cache is also just a hash list and can be referred to here.
+      "partialUpdate": True or False, # When true, this is a partial diff containing additions and removals based on what the client already has. When false, this is the complete hash list. When false, the client MUST delete any locally stored version for this hash list. This means that either the version possessed by the client is seriously out-of-date or the client data is believed to be corrupt. The `compressed_removals` field will be empty. When true, the client MUST apply an incremental update by applying removals and then additions.
+      "sha256Checksum": "A String", # The sorted list of all hashes, hashed again with SHA256. This is the checksum for the sorted list of all hashes present in the database after applying the provided update. In the case that no updates were provided, the server will omit this field to indicate that the client should use the existing checksum.
+      "version": "A String", # The version of the hash list. The client MUST NOT manipulate those bytes.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/safebrowsing_v5.html b/docs/dyn/safebrowsing_v5.html index 584178f035..147ca1c8df 100644 --- a/docs/dyn/safebrowsing_v5.html +++ b/docs/dyn/safebrowsing_v5.html @@ -74,6 +74,16 @@

Safe Browsing API

Instance Methods

+

+ hashList() +

+

Returns the hashList Resource.

+ +

+ hashLists() +

+

Returns the hashLists Resource.

+

hashes()

diff --git a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html index 7291f1cd87..a66af1e4b8 100644 --- a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html @@ -853,6 +853,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1134,6 +1139,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1154,6 +1164,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1170,6 +1185,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/securitycenter_v1.folders.sources.findings.html b/docs/dyn/securitycenter_v1.folders.sources.findings.html index 1f2a2516c8..d445c2ae8b 100644 --- a/docs/dyn/securitycenter_v1.folders.sources.findings.html +++ b/docs/dyn/securitycenter_v1.folders.sources.findings.html @@ -422,6 +422,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -703,6 +708,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -723,6 +733,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -739,6 +754,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1168,6 +1188,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1449,6 +1474,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1469,6 +1499,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1485,6 +1520,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1828,6 +1868,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2109,6 +2154,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2129,6 +2179,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2145,6 +2200,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2501,6 +2561,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2782,6 +2847,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2802,6 +2872,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2818,6 +2893,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3175,6 +3255,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3456,6 +3541,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3476,6 +3566,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3492,6 +3587,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html index 64eaa44060..50969a9168 100644 --- a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html @@ -853,6 +853,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1134,6 +1139,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1154,6 +1164,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1170,6 +1185,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/securitycenter_v1.organizations.sources.findings.html b/docs/dyn/securitycenter_v1.organizations.sources.findings.html index 1c4bb4e803..534915cc93 100644 --- a/docs/dyn/securitycenter_v1.organizations.sources.findings.html +++ b/docs/dyn/securitycenter_v1.organizations.sources.findings.html @@ -355,6 +355,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -636,6 +641,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -656,6 +666,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -672,6 +687,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1015,6 +1035,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1296,6 +1321,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1316,6 +1346,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1332,6 +1367,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1746,6 +1786,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2027,6 +2072,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2047,6 +2097,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2063,6 +2118,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2492,6 +2552,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2773,6 +2838,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2793,6 +2863,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2809,6 +2884,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3152,6 +3232,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3433,6 +3518,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3453,6 +3543,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3469,6 +3564,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3825,6 +3925,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4106,6 +4211,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4126,6 +4236,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4142,6 +4257,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4499,6 +4619,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4780,6 +4905,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4800,6 +4930,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -4816,6 +4951,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html index c6f54273c7..ea189b317c 100644 --- a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html @@ -853,6 +853,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1134,6 +1139,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1154,6 +1164,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1170,6 +1185,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/securitycenter_v1.projects.sources.findings.html b/docs/dyn/securitycenter_v1.projects.sources.findings.html index 1b6d6eb7b5..c561f199fb 100644 --- a/docs/dyn/securitycenter_v1.projects.sources.findings.html +++ b/docs/dyn/securitycenter_v1.projects.sources.findings.html @@ -422,6 +422,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -703,6 +708,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -723,6 +733,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -739,6 +754,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1168,6 +1188,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1449,6 +1474,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1469,6 +1499,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1485,6 +1520,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -1828,6 +1868,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2109,6 +2154,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2129,6 +2179,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2145,6 +2200,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2501,6 +2561,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2782,6 +2847,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2802,6 +2872,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -2818,6 +2893,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3175,6 +3255,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3456,6 +3541,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3476,6 +3566,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. @@ -3492,6 +3587,11 @@

Method Details

"relativePath": "A String", # Relative path of the file in the partition as a JSON encoded string. Example: /home/user1/executable_file.sh }, "hashedSize": "A String", # The length in bytes of the file prefix that was hashed. If hashed_size == size, any hashes reported represent the entire file. + "operations": [ # Operation(s) performed on a file. + { # Operation(s) performed on a file. + "type": "A String", # The type of the operation + }, + ], "partiallyHashed": True or False, # True when the hash covers only a prefix of the file. "path": "A String", # Absolute path of the file as a JSON encoded string. "sha256": "A String", # SHA256 hash of the first hashed_size bytes of the file encoded as a hex string. If hashed_size == size, sha256 represents the SHA256 hash of the entire file. diff --git a/docs/dyn/servicemanagement_v1.services.configs.html b/docs/dyn/servicemanagement_v1.services.configs.html index bdfe0e5b4d..757c92cbed 100644 --- a/docs/dyn/servicemanagement_v1.services.configs.html +++ b/docs/dyn/servicemanagement_v1.services.configs.html @@ -622,6 +622,25 @@

Method Details

"autoPopulatedFields": [ # List of top-level fields of the request message, that should be automatically populated by the client libraries based on their (google.api.field_info).format. Currently supported format: UUID4. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.CreateExample auto_populated_fields: - request_id "A String", ], + "batching": { # `BatchingConfigProto` defines the batching configuration for an API method. # Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10 + "batchDescriptor": { # `BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing. # The request and response fields used in batching. + "batchedField": "A String", # The repeated field in the request message to be aggregated by batching. + "discriminatorFields": [ # A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests. + "A String", + ], + "subresponseField": "A String", # Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together. + }, + "thresholds": { # `BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero. # The thresholds which trigger a batched request to be sent. + "delayThreshold": "A String", # The duration after which a batch should be sent, starting from the addition of the first message to that batch. + "elementCountLimit": 42, # The maximum number of elements collected in a batch that could be accepted by server. + "elementCountThreshold": 42, # The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent. + "flowControlByteLimit": 42, # The maximum size of data allowed by flow control. + "flowControlElementLimit": 42, # The maximum number of elements allowed by flow control. + "flowControlLimitExceededBehavior": "A String", # The behavior to take when the flow control limit is exceeded. + "requestByteLimit": 42, # The maximum size of the request that could be accepted by server. + "requestByteThreshold": "A String", # The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message. + }, + }, "longRunning": { # Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). # Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes "initialPollDelay": "A String", # Initial delay after which the first poll request will be made. Default value: 5 seconds. "maxPollDelay": "A String", # Maximum time between two subsequent poll requests. Default value: 45 seconds. @@ -1306,6 +1325,25 @@

Method Details

"autoPopulatedFields": [ # List of top-level fields of the request message, that should be automatically populated by the client libraries based on their (google.api.field_info).format. Currently supported format: UUID4. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.CreateExample auto_populated_fields: - request_id "A String", ], + "batching": { # `BatchingConfigProto` defines the batching configuration for an API method. # Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10 + "batchDescriptor": { # `BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing. # The request and response fields used in batching. + "batchedField": "A String", # The repeated field in the request message to be aggregated by batching. + "discriminatorFields": [ # A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests. + "A String", + ], + "subresponseField": "A String", # Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together. + }, + "thresholds": { # `BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero. # The thresholds which trigger a batched request to be sent. + "delayThreshold": "A String", # The duration after which a batch should be sent, starting from the addition of the first message to that batch. + "elementCountLimit": 42, # The maximum number of elements collected in a batch that could be accepted by server. + "elementCountThreshold": 42, # The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent. + "flowControlByteLimit": 42, # The maximum size of data allowed by flow control. + "flowControlElementLimit": 42, # The maximum number of elements allowed by flow control. + "flowControlLimitExceededBehavior": "A String", # The behavior to take when the flow control limit is exceeded. + "requestByteLimit": 42, # The maximum size of the request that could be accepted by server. + "requestByteThreshold": "A String", # The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message. + }, + }, "longRunning": { # Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). # Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes "initialPollDelay": "A String", # Initial delay after which the first poll request will be made. Default value: 5 seconds. "maxPollDelay": "A String", # Maximum time between two subsequent poll requests. Default value: 45 seconds. @@ -2002,6 +2040,25 @@

Method Details

"autoPopulatedFields": [ # List of top-level fields of the request message, that should be automatically populated by the client libraries based on their (google.api.field_info).format. Currently supported format: UUID4. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.CreateExample auto_populated_fields: - request_id "A String", ], + "batching": { # `BatchingConfigProto` defines the batching configuration for an API method. # Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10 + "batchDescriptor": { # `BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing. # The request and response fields used in batching. + "batchedField": "A String", # The repeated field in the request message to be aggregated by batching. + "discriminatorFields": [ # A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests. + "A String", + ], + "subresponseField": "A String", # Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together. + }, + "thresholds": { # `BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero. # The thresholds which trigger a batched request to be sent. + "delayThreshold": "A String", # The duration after which a batch should be sent, starting from the addition of the first message to that batch. + "elementCountLimit": 42, # The maximum number of elements collected in a batch that could be accepted by server. + "elementCountThreshold": 42, # The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent. + "flowControlByteLimit": 42, # The maximum size of data allowed by flow control. + "flowControlElementLimit": 42, # The maximum number of elements allowed by flow control. + "flowControlLimitExceededBehavior": "A String", # The behavior to take when the flow control limit is exceeded. + "requestByteLimit": 42, # The maximum size of the request that could be accepted by server. + "requestByteThreshold": "A String", # The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message. + }, + }, "longRunning": { # Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). # Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes "initialPollDelay": "A String", # Initial delay after which the first poll request will be made. Default value: 5 seconds. "maxPollDelay": "A String", # Maximum time between two subsequent poll requests. Default value: 45 seconds. @@ -2698,6 +2755,25 @@

Method Details

"autoPopulatedFields": [ # List of top-level fields of the request message, that should be automatically populated by the client libraries based on their (google.api.field_info).format. Currently supported format: UUID4. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.CreateExample auto_populated_fields: - request_id "A String", ], + "batching": { # `BatchingConfigProto` defines the batching configuration for an API method. # Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10 + "batchDescriptor": { # `BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing. # The request and response fields used in batching. + "batchedField": "A String", # The repeated field in the request message to be aggregated by batching. + "discriminatorFields": [ # A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests. + "A String", + ], + "subresponseField": "A String", # Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together. + }, + "thresholds": { # `BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero. # The thresholds which trigger a batched request to be sent. + "delayThreshold": "A String", # The duration after which a batch should be sent, starting from the addition of the first message to that batch. + "elementCountLimit": 42, # The maximum number of elements collected in a batch that could be accepted by server. + "elementCountThreshold": 42, # The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent. + "flowControlByteLimit": 42, # The maximum size of data allowed by flow control. + "flowControlElementLimit": 42, # The maximum number of elements allowed by flow control. + "flowControlLimitExceededBehavior": "A String", # The behavior to take when the flow control limit is exceeded. + "requestByteLimit": 42, # The maximum size of the request that could be accepted by server. + "requestByteThreshold": "A String", # The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message. + }, + }, "longRunning": { # Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). # Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes "initialPollDelay": "A String", # Initial delay after which the first poll request will be made. Default value: 5 seconds. "maxPollDelay": "A String", # Maximum time between two subsequent poll requests. Default value: 45 seconds. diff --git a/docs/dyn/servicemanagement_v1.services.html b/docs/dyn/servicemanagement_v1.services.html index 50d23057e1..7bc55160bc 100644 --- a/docs/dyn/servicemanagement_v1.services.html +++ b/docs/dyn/servicemanagement_v1.services.html @@ -817,6 +817,25 @@

Method Details

"autoPopulatedFields": [ # List of top-level fields of the request message, that should be automatically populated by the client libraries based on their (google.api.field_info).format. Currently supported format: UUID4. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.CreateExample auto_populated_fields: - request_id "A String", ], + "batching": { # `BatchingConfigProto` defines the batching configuration for an API method. # Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10 + "batchDescriptor": { # `BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing. # The request and response fields used in batching. + "batchedField": "A String", # The repeated field in the request message to be aggregated by batching. + "discriminatorFields": [ # A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests. + "A String", + ], + "subresponseField": "A String", # Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together. + }, + "thresholds": { # `BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero. # The thresholds which trigger a batched request to be sent. + "delayThreshold": "A String", # The duration after which a batch should be sent, starting from the addition of the first message to that batch. + "elementCountLimit": 42, # The maximum number of elements collected in a batch that could be accepted by server. + "elementCountThreshold": 42, # The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent. + "flowControlByteLimit": 42, # The maximum size of data allowed by flow control. + "flowControlElementLimit": 42, # The maximum number of elements allowed by flow control. + "flowControlLimitExceededBehavior": "A String", # The behavior to take when the flow control limit is exceeded. + "requestByteLimit": 42, # The maximum size of the request that could be accepted by server. + "requestByteThreshold": "A String", # The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message. + }, + }, "longRunning": { # Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). # Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes "initialPollDelay": "A String", # Initial delay after which the first poll request will be made. Default value: 5 seconds. "maxPollDelay": "A String", # Maximum time between two subsequent poll requests. Default value: 45 seconds. diff --git a/docs/dyn/servicenetworking_v1.services.connections.html b/docs/dyn/servicenetworking_v1.services.connections.html index 9a9622daab..bdcd88deb3 100644 --- a/docs/dyn/servicenetworking_v1.services.connections.html +++ b/docs/dyn/servicenetworking_v1.services.connections.html @@ -189,8 +189,8 @@

Method Details

List the private connections that are configured in a service consumer's VPC network.
 
 Args:
-  parent: string, The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed. (required)
-  network: string, The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.
+  parent: string, Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed. (required)
+  network: string, Required. The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
diff --git a/docs/dyn/sheets_v4.spreadsheets.html b/docs/dyn/sheets_v4.spreadsheets.html
index 8fb35a7ca1..f683349789 100644
--- a/docs/dyn/sheets_v4.spreadsheets.html
+++ b/docs/dyn/sheets_v4.spreadsheets.html
@@ -120,7 +120,8 @@ 

Method Details

{ # A single kind of update to apply to a spreadsheet. "addBanding": { # Adds a new banded range to the spreadsheet. # Adds a new banded range "bandedRange": { # A banded (alternating colors) range in a sheet. # The banded range to add. The bandedRangeId field is optional; if one is not set, an id will be randomly generated. (It is an error to specify the ID of a range that already exists.) - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -4660,7 +4661,8 @@

Method Details

}, "updateBanding": { # Updates properties of the supplied banded range. # Updates a banded range "bandedRange": { # A banded (alternating colors) range in a sheet. # The banded range to update with the new properties. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -8321,7 +8323,8 @@

Method Details

{ # A single response from an update. "addBanding": { # The result of adding a banded range. # A reply from adding a banded range. "bandedRange": { # A banded (alternating colors) range in a sheet. # The banded range that was added. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -12013,7 +12016,8 @@

Method Details

{ # A sheet in a spreadsheet. "bandedRanges": [ # The banded (alternating colors) ranges on this sheet. { # A banded (alternating colors) range in a sheet. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -15750,7 +15754,8 @@

Method Details

{ # A sheet in a spreadsheet. "bandedRanges": [ # The banded (alternating colors) ranges on this sheet. { # A banded (alternating colors) range in a sheet. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -19480,7 +19485,8 @@

Method Details

{ # A sheet in a spreadsheet. "bandedRanges": [ # The banded (alternating colors) ranges on this sheet. { # A banded (alternating colors) range in a sheet. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -23220,7 +23226,8 @@

Method Details

{ # A sheet in a spreadsheet. "bandedRanges": [ # The banded (alternating colors) ranges on this sheet. { # A banded (alternating colors) range in a sheet. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). @@ -26996,7 +27003,8 @@

Method Details

{ # A sheet in a spreadsheet. "bandedRanges": [ # The banded (alternating colors) ranges on this sheet. { # A banded (alternating colors) range in a sheet. - "bandedRangeId": 42, # The ID of the banded range. + "bandedRangeId": 42, # The ID of the banded range. If unset, refer to banded_range_reference. + "bandedRangeReference": "A String", # Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id. "columnProperties": { # Properties referring a single dimension (either row or column). If both BandedRange.row_properties and BandedRange.column_properties are set, the fill colors are applied to cells according to the following rules: * header_color and footer_color take priority over band colors. * first_band_color takes priority over second_band_color. * row_properties takes priority over column_properties. For example, the first row color takes priority over the first column color, but the first column color takes priority over the second row color. Similarly, the row header takes priority over the column header in the top left cell, but the column header takes priority over the first row color if the row header is not set. # Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified. "firstBandColor": { # Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... # The first color that is alternating. (Required) Deprecated: Use first_band_color_style. "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). diff --git a/docs/dyn/spanner_v1.projects.instances.databases.html b/docs/dyn/spanner_v1.projects.instances.databases.html index edaa2a7823..bcbba55d96 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.html @@ -96,7 +96,7 @@

Instance Methods

addSplitPoints(database, body=None, x__xgafv=None)

-

Adds split points to specified tables, indexes of a database.

+

Adds split points to specified tables and indexes of a database.

changequorum(name, body=None, x__xgafv=None)

`ChangeQuorum` is strictly restricted to databases that use dual-region instance configurations. Initiates a background operation to change the quorum of a database from dual-region mode to single-region mode or vice versa. The returned long-running operation has a name of the format `projects//instances//databases//operations/` and can be used to track execution of the `ChangeQuorum`. The metadata field type is ChangeQuorumMetadata. Authorization requires `spanner.databases.changequorum` permission on the resource database.

@@ -145,20 +145,20 @@

Instance Methods

Method Details

addSplitPoints(database, body=None, x__xgafv=None) -
Adds split points to specified tables, indexes of a database.
+  
Adds split points to specified tables and indexes of a database.
 
 Args:
-  database: string, Required. The database on whose tables/indexes split points are to be added. Values are of the form `projects//instances//databases/`. (required)
+  database: string, Required. The database on whose tables or indexes the split points are to be added. Values are of the form `projects//instances//databases/`. (required)
   body: object, The request body.
     The object takes the form of:
 
 { # The request for AddSplitPoints.
-  "initiator": "A String", # Optional. A user-supplied tag associated with the split points. For example, "initial_data_load", "special_event_1". Defaults to "CloudAddSplitPointsAPI" if not specified. The length of the tag must not exceed 50 characters,else will be trimmed. Only valid UTF8 characters are allowed.
+  "initiator": "A String", # Optional. A user-supplied tag associated with the split points. For example, "initial_data_load", "special_event_1". Defaults to "CloudAddSplitPointsAPI" if not specified. The length of the tag must not exceed 50 characters, or else it is trimmed. Only valid UTF8 characters are allowed.
   "splitPoints": [ # Required. The split points to add.
-    { # The split points of a table/index.
+    { # The split points of a table or an index.
       "expireTime": "A String", # Optional. The expiration timestamp of the split points. A timestamp in the past means immediate expiration. The maximum value can be 30 days in the future. Defaults to 10 days in the future if not specified.
       "index": "A String", # The index to split. If specified, the `table` field must refer to the index's base table.
-      "keys": [ # Required. The list of split keys, i.e., the split boundaries.
+      "keys": [ # Required. The list of split keys. In essence, the split boundaries.
         { # A split key.
           "keyParts": [ # Required. The column values making up the split key.
             "",
diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
index 93cdca0ee1..ffc8c23891 100644
--- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
+++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
@@ -446,16 +446,16 @@ 

Method Details

], }, }, - "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -481,7 +481,7 @@

Method Details

{ # A transaction. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -590,16 +590,16 @@

Method Details

"transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. The value of transaction_tag should be the same for all requests belonging to the same transaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. }, "returnCommitStats": True or False, # If `true`, then statistics related to the transaction is included in the CommitResponse. Default value is `false`. - "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it's possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it's possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -733,16 +733,16 @@

Method Details

}, ], "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Required. The transaction to use. Must be a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -752,16 +752,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -799,7 +799,7 @@

Method Details

}, "transaction": { # A transaction. # If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -947,16 +947,16 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one succeeds. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction can be aborted. Replays of previously handled requests yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -966,16 +966,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1007,7 +1007,7 @@

Method Details

}, "transaction": { # A transaction. # If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -1144,16 +1144,16 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one succeeds. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction can be aborted. Replays of previously handled requests yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1163,16 +1163,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1206,7 +1206,7 @@

Method Details

}, "transaction": { # A transaction. # If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -1386,16 +1386,16 @@

Method Details

}, "sql": "A String", # Required. The query request to generate partitions for. The request fails if the query isn't root partitionable. For a query to be root partitionable, it needs to satisfy a few conditions. For example, if the query execution plan contains a distributed union operator, then it must be the first operator in the plan. For more information about other conditions, see [Read data in parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). The query request must not contain DML commands, such as `INSERT`, `UPDATE`, or `DELETE`. Use `ExecuteStreamingSql` with a `PartitionedDml` transaction for large, partition-friendly DML operations. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read-only snapshot transactions are supported, read and write and single-use transactions are not. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1405,16 +1405,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1442,7 +1442,7 @@

Method Details

], "transaction": { # A transaction. # Transaction created by this request. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -1495,16 +1495,16 @@

Method Details

}, "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1514,16 +1514,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1551,7 +1551,7 @@

Method Details

], "transaction": { # A transaction. # Transaction created by this request. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -1630,16 +1630,16 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1649,16 +1649,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1690,7 +1690,7 @@

Method Details

}, "transaction": { # A transaction. # If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, @@ -1857,16 +1857,16 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1876,16 +1876,16 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. - "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error. "isolationLevel": "A String", # Isolation level for the transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, - "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. + "readOnly": { # Message type to initiate a read-only transaction. # Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. "exactStaleness": "A String", # Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`. "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions. "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. - "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. + "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: `"2014-10-02T15:01:23.045123456Z"`. "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction. "strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, @@ -1919,7 +1919,7 @@

Method Details

}, "transaction": { # A transaction. # If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here. "id": "A String", # `id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests. - "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the Transaction message in the BeginTransaction response and also as a part of the ResultSet and PartialResultSet responses. # A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. "precommitToken": "A String", # Opaque precommit token. "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, diff --git a/docs/dyn/spanner_v1.projects.instances.html b/docs/dyn/spanner_v1.projects.instances.html index e640acd532..dcc37ded19 100644 --- a/docs/dyn/spanner_v1.projects.instances.html +++ b/docs/dyn/spanner_v1.projects.instances.html @@ -504,6 +504,17 @@

Method Details

{ # The request for MoveInstance. "targetConfig": "A String", # Required. The target instance configuration where to move the instance. Values are of the form `projects//instanceConfigs/`. + "targetDatabaseMoveConfigs": [ # Optional. The configuration for each database in the target instance configuration. + { # The configuration for each database in the target instance configuration. + "databaseId": "A String", # Required. The unique identifier of the database resource in the Instance. For example if the database uri is projects/foo/instances/bar/databases/baz, the id to supply here is baz. + "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Optional. Encryption configuration to be used for the database in target configuration. Should be specified for every database which currently uses CMEK encryption. If a database currently uses GOOGLE_MANAGED encryption and a target encryption config is not specified, it defaults to GOOGLE_MANAGED. If a database currently uses Google-managed encryption and a target encryption config is specified, the request is rejected. If a database currently uses CMEK encryption, a target encryption config must be specified. You cannot move a CMEK database to a Google-managed encryption database by MoveInstance. + "kmsKeyName": "A String", # Optional. This field is maintained for backwards compatibility. For new callers, we recommend using `kms_key_names` to specify the KMS key. `kms_key_name` should only be used if the location of the KMS key matches the database instance’s configuration (location) exactly. E.g. The KMS location is in us-central1 or nam3 and the database instance is also in us-central1 or nam3. The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. + "A String", + ], + }, + }, + ], } x__xgafv: string, V1 error format. diff --git a/docs/dyn/sqladmin_v1.Backups.html b/docs/dyn/sqladmin_v1.Backups.html index 05273f79ed..d0b5ee8d88 100644 --- a/docs/dyn/sqladmin_v1.Backups.html +++ b/docs/dyn/sqladmin_v1.Backups.html @@ -127,6 +127,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -792,6 +793,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -1169,6 +1171,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -1560,6 +1563,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. diff --git a/docs/dyn/sqladmin_v1.instances.html b/docs/dyn/sqladmin_v1.instances.html index 48468015bd..9860bf6878 100644 --- a/docs/dyn/sqladmin_v1.instances.html +++ b/docs/dyn/sqladmin_v1.instances.html @@ -1766,6 +1766,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -2304,6 +2305,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -2793,6 +2795,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -3189,6 +3192,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -4476,6 +4480,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -5733,6 +5738,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. diff --git a/docs/dyn/sqladmin_v1beta4.backups.html b/docs/dyn/sqladmin_v1beta4.backups.html index 3cae06df7c..a427628251 100644 --- a/docs/dyn/sqladmin_v1beta4.backups.html +++ b/docs/dyn/sqladmin_v1beta4.backups.html @@ -132,6 +132,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -797,6 +798,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -1174,6 +1176,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -1565,6 +1568,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. diff --git a/docs/dyn/sqladmin_v1beta4.instances.html b/docs/dyn/sqladmin_v1beta4.instances.html index 2ca1f10e62..8324589215 100644 --- a/docs/dyn/sqladmin_v1beta4.instances.html +++ b/docs/dyn/sqladmin_v1beta4.instances.html @@ -1766,6 +1766,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -2304,6 +2305,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -2793,6 +2795,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -3189,6 +3192,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -4476,6 +4480,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. @@ -5733,6 +5738,7 @@

Method Details

"A String", ], "backendType": "A String", # The backend type. `SECOND_GEN`: Cloud SQL database instance. `EXTERNAL`: A database server that is not managed by Google. This property is read-only; use the `tier` property in the `settings` object to determine the database type. + "clearNetwork": True or False, # Clears private network settings when the instance is restored. "connectionName": "A String", # Connection name of the Cloud SQL instance used in connection strings. "createTime": "A String", # Output only. The time when the instance was created in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`. "currentDiskSize": "A String", # The current disk usage of the instance in bytes. This property has been deprecated. Use the "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details. diff --git a/docs/dyn/storage_v1.buckets.html b/docs/dyn/storage_v1.buckets.html index 37f5c9fec0..b66e61397f 100644 --- a/docs/dyn/storage_v1.buckets.html +++ b/docs/dyn/storage_v1.buckets.html @@ -268,6 +268,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -561,6 +562,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -789,6 +791,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -1014,6 +1017,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -1248,6 +1252,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -1461,6 +1466,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -1690,6 +1696,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -1956,6 +1963,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -2304,6 +2312,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. @@ -2533,6 +2542,7 @@

Method Details

}, "id": "A String", # The ID of the bucket. For buckets, the id and name properties are the same. "ipFilter": { # The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'. + "allowCrossOrgVpcs": True or False, # Whether to allow cross-org VPCs in the bucket's IP filter configuration. "mode": "A String", # The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. "publicNetworkSource": { # The public network source of the bucket's IP filter. "allowedIpCidrRanges": [ # The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. diff --git a/docs/dyn/tasks_v1.tasks.html b/docs/dyn/tasks_v1.tasks.html index bab57b6bcb..f1d312afe5 100644 --- a/docs/dyn/tasks_v1.tasks.html +++ b/docs/dyn/tasks_v1.tasks.html @@ -372,7 +372,7 @@

Method Details

tasklist: string, Task list identifier. (required) task: string, Task identifier. (required) destinationTasklist: string, Optional. Destination task list identifier. If set, the task is moved from tasklist to the destinationTasklist list. Otherwise the task is moved within its current list. Recurrent tasks cannot currently be moved between lists. - parent: string, Optional. New parent task identifier. If the task is moved to the top level, this parameter is omitted. The task set as parent must exist in the task list and can not be hidden. Exceptions: 1. Assigned tasks can not be set as parent task (have subtasks) or be moved under a parent task (become subtasks). 2. Tasks that are both completed and hidden cannot be nested, so the parent field must be empty. + parent: string, Optional. New parent task identifier. If the task is moved to the top level, this parameter is omitted. The task set as parent must exist in the task list and can not be hidden. Exceptions: 1. Assigned and repeating tasks cannot be set as parent tasks (have subtasks), or be moved under a parent task (become subtasks). 2. Tasks that are both completed and hidden cannot be nested, so the parent field must be empty. previous: string, Optional. New previous sibling task identifier. If the task is moved to the first position among its siblings, this parameter is omitted. The task set as previous must exist in the task list and can not be hidden. Exceptions: 1. Tasks that are both completed and hidden can only be moved to position 0, so the previous field must be empty. x__xgafv: string, V1 error format. Allowed values diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index f79fb48f5c..5bc06cb2f5 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -19841,7 +19841,7 @@ } } }, -"revision": "20250502", +"revision": "20250519", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -20483,6 +20483,10 @@ "apiKeySecretVersion": { "description": "Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}", "type": "string" +}, +"apiKeyString": { +"description": "The API key string. Either this or `api_key_secret_version` must be set.", +"type": "string" } }, "type": "object" @@ -20705,6 +20709,146 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1AuthConfig": { +"description": "Auth configuration to run the extension.", +"id": "GoogleCloudAiplatformV1AuthConfig", +"properties": { +"apiKeyConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfigApiKeyConfig", +"description": "Config for API key auth." +}, +"authType": { +"description": "Type of auth scheme.", +"enum": [ +"AUTH_TYPE_UNSPECIFIED", +"NO_AUTH", +"API_KEY_AUTH", +"HTTP_BASIC_AUTH", +"GOOGLE_SERVICE_ACCOUNT_AUTH", +"OAUTH", +"OIDC_AUTH" +], +"enumDescriptions": [ +"", +"No Auth.", +"API Key Auth.", +"HTTP Basic Auth.", +"Google Service Account Auth.", +"OAuth auth.", +"OpenID Connect (OIDC) Auth." +], +"type": "string" +}, +"googleServiceAccountConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfigGoogleServiceAccountConfig", +"description": "Config for Google Service Account auth." +}, +"httpBasicAuthConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfigHttpBasicAuthConfig", +"description": "Config for HTTP Basic auth." +}, +"oauthConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfigOauthConfig", +"description": "Config for user oauth." +}, +"oidcConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfigOidcConfig", +"description": "Config for user OIDC auth." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1AuthConfigApiKeyConfig": { +"description": "Config for authentication with API key.", +"id": "GoogleCloudAiplatformV1AuthConfigApiKeyConfig", +"properties": { +"apiKeySecret": { +"description": "Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", +"type": "string" +}, +"apiKeyString": { +"description": "Optional. The API key to be used in the request directly.", +"type": "string" +}, +"httpElementLocation": { +"description": "Optional. The location of the API key.", +"enum": [ +"HTTP_IN_UNSPECIFIED", +"HTTP_IN_QUERY", +"HTTP_IN_HEADER", +"HTTP_IN_PATH", +"HTTP_IN_BODY", +"HTTP_IN_COOKIE" +], +"enumDescriptions": [ +"", +"Element is in the HTTP request query.", +"Element is in the HTTP request header.", +"Element is in the HTTP request path.", +"Element is in the HTTP request body.", +"Element is in the HTTP request cookie." +], +"type": "string" +}, +"name": { +"description": "Optional. The parameter name of the API key. E.g. If the API request is \"https://example.com/act?api_key=\", \"api_key\" would be the parameter name.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1AuthConfigGoogleServiceAccountConfig": { +"description": "Config for Google Service Account Authentication.", +"id": "GoogleCloudAiplatformV1AuthConfigGoogleServiceAccountConfig", +"properties": { +"serviceAccount": { +"description": "Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1AuthConfigHttpBasicAuthConfig": { +"description": "Config for HTTP Basic Authentication.", +"id": "GoogleCloudAiplatformV1AuthConfigHttpBasicAuthConfig", +"properties": { +"credentialSecret": { +"description": "Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1AuthConfigOauthConfig": { +"description": "Config for user oauth.", +"id": "GoogleCloudAiplatformV1AuthConfigOauthConfig", +"properties": { +"accessToken": { +"description": "Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", +"type": "string" +}, +"serviceAccount": { +"description": "The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1AuthConfigOidcConfig": { +"description": "Config for user OIDC auth.", +"id": "GoogleCloudAiplatformV1AuthConfigOidcConfig", +"properties": { +"idToken": { +"description": "OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", +"type": "string" +}, +"serviceAccount": { +"description": "The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1AutomaticResources": { "description": "A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines.", "id": "GoogleCloudAiplatformV1AutomaticResources", @@ -25867,6 +26011,74 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1ExternalApi": { +"description": "Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec.", +"id": "GoogleCloudAiplatformV1ExternalApi", +"properties": { +"apiAuth": { +"$ref": "GoogleCloudAiplatformV1ApiAuth", +"deprecated": true, +"description": "The authentication config to access the API. Deprecated. Please use auth_config instead." +}, +"apiSpec": { +"description": "The API spec that the external API implements.", +"enum": [ +"API_SPEC_UNSPECIFIED", +"SIMPLE_SEARCH", +"ELASTIC_SEARCH" +], +"enumDescriptions": [ +"Unspecified API spec. This value should not be used.", +"Simple search API spec.", +"Elastic search API spec." +], +"type": "string" +}, +"authConfig": { +"$ref": "GoogleCloudAiplatformV1AuthConfig", +"description": "The authentication config to access the API." +}, +"elasticSearchParams": { +"$ref": "GoogleCloudAiplatformV1ExternalApiElasticSearchParams", +"description": "Parameters for the elastic search API." +}, +"endpoint": { +"description": "The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search", +"type": "string" +}, +"simpleSearchParams": { +"$ref": "GoogleCloudAiplatformV1ExternalApiSimpleSearchParams", +"description": "Parameters for the simple search API." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1ExternalApiElasticSearchParams": { +"description": "The search parameters to use for the ELASTIC_SEARCH spec.", +"id": "GoogleCloudAiplatformV1ExternalApiElasticSearchParams", +"properties": { +"index": { +"description": "The ElasticSearch index to use.", +"type": "string" +}, +"numHits": { +"description": "Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.", +"format": "int32", +"type": "integer" +}, +"searchTemplate": { +"description": "The ElasticSearch search template to use.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1ExternalApiSimpleSearchParams": { +"description": "The search parameters to use for SIMPLE_SEARCH spec.", +"id": "GoogleCloudAiplatformV1ExternalApiSimpleSearchParams", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1Fact": { "description": "The fact used in grounding.", "id": "GoogleCloudAiplatformV1Fact", @@ -28692,6 +28904,10 @@ "$ref": "GoogleCloudAiplatformV1RagFileTransformationConfig", "description": "Specifies the transformation config for RagFiles." }, +"rebuildAnnIndex": { +"description": "Rebuilds the ANN index to optimize for recall on the imported data. Only applicable for RagCorpora running on RagManagedDb with `retrieval_strategy` set to `ANN`. The rebuild will be performed using the existing ANN config set on the RagCorpus. To change the ANN config, please use the UpdateRagCorpus API. Default is false, i.e., index is not rebuilt.", +"type": "boolean" +}, "sharePointSources": { "$ref": "GoogleCloudAiplatformV1SharePointSources", "description": "SharePoint sources." @@ -33847,6 +34063,11 @@ false "description": "Optional. Text part (can be code).", "type": "string" }, +"thought": { +"description": "Output only. Indicates if the part is thought from the model.", +"readOnly": true, +"type": "boolean" +}, "videoMetadata": { "$ref": "GoogleCloudAiplatformV1VideoMetadata", "description": "Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data." @@ -36143,7 +36364,7 @@ false "type": "object" }, "GoogleCloudAiplatformV1RagFileParsingConfigLlmParser": { -"description": "Specifies the advanced parsing for RagFiles.", +"description": "Specifies the LLM parsing for RagFiles.", "id": "GoogleCloudAiplatformV1RagFileParsingConfigLlmParser", "properties": { "customParsingPrompt": { @@ -36307,6 +36528,38 @@ false "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDb": { "description": "The config for the default RAG-managed Vector DB.", "id": "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDb", +"properties": { +"ann": { +"$ref": "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbANN", +"description": "Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency." +}, +"knn": { +"$ref": "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbKNN", +"description": "Performs a KNN search on RagCorpus. Default choice if not specified." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbANN": { +"description": "Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API.", +"id": "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbANN", +"properties": { +"leafCount": { +"description": "Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500.", +"format": "int32", +"type": "integer" +}, +"treeDepth": { +"description": "The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbKNN": { +"description": "Config for KNN search.", +"id": "GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDbKNN", "properties": {}, "type": "object" }, @@ -36970,6 +37223,10 @@ false "description": "Optional. Deprecated. This option is no longer supported.", "type": "boolean" }, +"externalApi": { +"$ref": "GoogleCloudAiplatformV1ExternalApi", +"description": "Use data source powered by external API for grounding." +}, "vertexAiSearch": { "$ref": "GoogleCloudAiplatformV1VertexAISearch", "description": "Set to use data source powered by Vertex AI Search." @@ -46000,6 +46257,13 @@ false "description": "Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder", "id": "GoogleCloudAiplatformV1VertexAISearch", "properties": { +"dataStoreSpecs": { +"description": "Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.", +"items": { +"$ref": "GoogleCloudAiplatformV1VertexAISearchDataStoreSpec" +}, +"type": "array" +}, "datastore": { "description": "Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", "type": "string" @@ -46020,6 +46284,21 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1VertexAISearchDataStoreSpec": { +"description": "Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec", +"id": "GoogleCloudAiplatformV1VertexAISearchDataStoreSpec", +"properties": { +"dataStore": { +"description": "Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", +"type": "string" +}, +"filter": { +"description": "Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1VertexAiSearchConfig": { "description": "Config for the Vertex AI Search.", "id": "GoogleCloudAiplatformV1VertexAiSearchConfig", diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 200a342ed4..fd57187ff9 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -23425,7 +23425,7 @@ } } }, -"revision": "20250502", +"revision": "20250519", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -24078,6 +24078,10 @@ "apiKeySecretVersion": { "description": "Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}", "type": "string" +}, +"apiKeyString": { +"description": "The API key string. Either this or `api_key_secret_version` must be set.", +"type": "string" } }, "type": "object" @@ -30623,6 +30627,74 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ExternalApi": { +"description": "Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApi", +"properties": { +"apiAuth": { +"$ref": "GoogleCloudAiplatformV1beta1ApiAuth", +"deprecated": true, +"description": "The authentication config to access the API. Deprecated. Please use auth_config instead." +}, +"apiSpec": { +"description": "The API spec that the external API implements.", +"enum": [ +"API_SPEC_UNSPECIFIED", +"SIMPLE_SEARCH", +"ELASTIC_SEARCH" +], +"enumDescriptions": [ +"Unspecified API spec. This value should not be used.", +"Simple search API spec.", +"Elastic search API spec." +], +"type": "string" +}, +"authConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfig", +"description": "The authentication config to access the API." +}, +"elasticSearchParams": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams", +"description": "Parameters for the elastic search API." +}, +"endpoint": { +"description": "The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search", +"type": "string" +}, +"simpleSearchParams": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams", +"description": "Parameters for the simple search API." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams": { +"description": "The search parameters to use for the ELASTIC_SEARCH spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams", +"properties": { +"index": { +"description": "The ElasticSearch index to use.", +"type": "string" +}, +"numHits": { +"description": "Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.", +"format": "int32", +"type": "integer" +}, +"searchTemplate": { +"description": "The ElasticSearch search template to use.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams": { +"description": "The search parameters to use for SIMPLE_SEARCH spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Fact": { "description": "The fact used in grounding.", "id": "GoogleCloudAiplatformV1beta1Fact", @@ -31245,7 +31317,7 @@ "id": "GoogleCloudAiplatformV1beta1FeatureSelectionConfigFeatureConfig", "properties": { "driftThreshold": { -"description": "Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3.", +"description": "Optional. Drift threshold. If calculated difference with baseline data larger than threshold, it will be considered as the feature has drift. If not present, the threshold will be default to 0.3. Must be in range [0, 1).", "format": "double", "type": "number" }, @@ -34123,6 +34195,10 @@ "$ref": "GoogleCloudAiplatformV1beta1RagFileTransformationConfig", "description": "Specifies the transformation config for RagFiles." }, +"rebuildAnnIndex": { +"description": "Rebuilds the ANN index to optimize for recall on the imported data. Only applicable for RagCorpora running on RagManagedDb with `retrieval_strategy` set to `ANN`. The rebuild will be performed using the existing ANN config set on the RagCorpus. To change the ANN config, please use the UpdateRagCorpus API. Default is false, i.e., index is not rebuilt.", +"type": "boolean" +}, "sharePointSources": { "$ref": "GoogleCloudAiplatformV1beta1SharePointSources", "description": "SharePoint sources." @@ -37463,6 +37539,10 @@ false "description": "The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8.", "type": "string" }, +"encryptionSpec": { +"$ref": "GoogleCloudAiplatformV1beta1EncryptionSpec", +"description": "Customer-managed encryption key spec for a ModelMonitor. If set, this ModelMonitor and all sub-resources of this ModelMonitor will be secured by this key." +}, "explanationSpec": { "$ref": "GoogleCloudAiplatformV1beta1ExplanationSpec", "description": "Optional model explanation spec. It is used for feature attribution monitoring." @@ -42683,6 +42763,10 @@ false "description": "Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.", "type": "string" }, +"encryptionSpec": { +"$ref": "GoogleCloudAiplatformV1beta1EncryptionSpec", +"description": "Optional. Immutable. The CMEK key name used to encrypt at-rest data related to this Corpus. Only applicable to RagManagedDb option for Vector DB. This field can only be set at corpus creation time, and cannot be updated or deleted." +}, "name": { "description": "Output only. The resource name of the RagCorpus.", "readOnly": true, @@ -43001,7 +43085,7 @@ false "type": "object" }, "GoogleCloudAiplatformV1beta1RagFileParsingConfigLlmParser": { -"description": "Specifies the advanced parsing for RagFiles.", +"description": "Specifies the LLM parsing for RagFiles.", "id": "GoogleCloudAiplatformV1beta1RagFileParsingConfigLlmParser", "properties": { "customParsingPrompt": { @@ -43244,6 +43328,38 @@ false "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDb": { "description": "The config for the default RAG-managed Vector DB.", "id": "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDb", +"properties": { +"ann": { +"$ref": "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbANN", +"description": "Performs an ANN search on RagCorpus. Use this if you have a lot of files (> 10K) in your RagCorpus and want to reduce the search latency." +}, +"knn": { +"$ref": "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbKNN", +"description": "Performs a KNN search on RagCorpus. Default choice if not specified." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbANN": { +"description": "Config for ANN search. RagManagedDb uses a tree-based structure to partition data and facilitate faster searches. As a tradeoff, it requires longer indexing time and manual triggering of index rebuild via the ImportRagFiles and UpdateRagCorpus API.", +"id": "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbANN", +"properties": { +"leafCount": { +"description": "Number of leaf nodes in the tree-based structure. Each leaf node contains groups of closely related vectors along with their corresponding centroid. Recommended value is 10 * sqrt(num of RagFiles in your RagCorpus). Default value is 500.", +"format": "int32", +"type": "integer" +}, +"treeDepth": { +"description": "The depth of the tree-based structure. Only depth values of 2 and 3 are supported. Recommended value is 2 if you have if you have O(10K) files in the RagCorpus and set this to 3 if more than that. Default value is 2.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbKNN": { +"description": "Config for KNN search.", +"id": "GoogleCloudAiplatformV1beta1RagVectorDbConfigRagManagedDbKNN", "properties": {}, "type": "object" }, @@ -44073,6 +44189,10 @@ false "description": "Optional. Deprecated. This option is no longer supported.", "type": "boolean" }, +"externalApi": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApi", +"description": "Use data source powered by external API for grounding." +}, "vertexAiSearch": { "$ref": "GoogleCloudAiplatformV1beta1VertexAISearch", "description": "Set to use data source powered by Vertex AI Search." @@ -53883,6 +54003,13 @@ false "description": "Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder", "id": "GoogleCloudAiplatformV1beta1VertexAISearch", "properties": { +"dataStoreSpecs": { +"description": "Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec" +}, +"type": "array" +}, "datastore": { "description": "Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", "type": "string" @@ -53903,6 +54030,21 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec": { +"description": "Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec", +"id": "GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec", +"properties": { +"dataStore": { +"description": "Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", +"type": "string" +}, +"filter": { +"description": "Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1VertexAiSearchConfig": { "description": "Config for the Vertex AI Search.", "id": "GoogleCloudAiplatformV1beta1VertexAiSearchConfig", diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index aa626bcb81..6920546631 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -4146,7 +4146,8 @@ ] }, "get": { -"description": "Checks whether a user's subscription purchase is valid and returns its expiry time.", +"deprecated": true, +"description": "Deprecated: Use purchases.subscriptionsv2.get instead. Checks whether a user's subscription purchase is valid and returns its expiry time.", "flatPath": "androidpublisher/v3/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}", "httpMethod": "GET", "id": "androidpublisher.purchases.subscriptions.get", @@ -4184,7 +4185,8 @@ ] }, "refund": { -"description": "Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.", +"deprecated": true, +"description": "Deprecated: Use orders.refund instead. Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.", "flatPath": "androidpublisher/v3/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:refund", "httpMethod": "POST", "id": "androidpublisher.purchases.subscriptions.refund", @@ -4219,7 +4221,8 @@ ] }, "revoke": { -"description": "Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.", +"deprecated": true, +"description": "Deprecated: Use purchases.subscriptionsv2.revoke instead. Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.", "flatPath": "androidpublisher/v3/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:revoke", "httpMethod": "POST", "id": "androidpublisher.purchases.subscriptions.revoke", @@ -4793,7 +4796,7 @@ } } }, -"revision": "20250519", +"revision": "20250520", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Abi": { @@ -7775,7 +7778,7 @@ false "type": "object" }, "Order": { -"description": "Details of an order.", +"description": "The Order resource encapsulates comprehensive information about a transaction made on Google Play. It includes a variety of attributes that provide details about the order itself, the products purchased, and the history of events related to the order. The Orders APIs provide real-time access to your order data within the Google Play ecosystem. You can retrieve detailed information and metadata for both one-time and recurring orders, including transaction details like charges, taxes, and refunds, as well as metadata such as pricing phases for subscriptions. The Orders APIs let you automate tasks related to order management, reducing the need for manual checks via the Play Developer Console. The following are some of the use cases for this API: + Real-time order data retrieval - Get order details and metadata immediately after a purchase using an order ID. + Order update synchronization - Periodically sync order updates to maintain an up-to-date record of order information. Note: + The Orders API calls count towards your Play Developer API quota, which defaults to 200K daily, and may be insufficient to sync extensive order histories. + A maximum of 1000 orders can be retrieved per call. Using larger page sizes is recommended to minimize quota usage. Check your quota in the Cloud Console and request more if required.", "id": "Order", "properties": { "buyerAddress": { @@ -9450,7 +9453,8 @@ false "type": "string" }, "latestOrderId": { -"description": "The order id of the latest order associated with the purchase of the subscription. For autoRenewing subscription, this is the order id of signup order if it is not renewed yet, or the last recurring order id (success, pending, or declined order). For prepaid subscription, this is the order id associated with the queried purchase token.", +"deprecated": true, +"description": "Deprecated: Use line_items.latest_successful_order_id instead. The order id of the latest order associated with the purchase of the subscription. For autoRenewing subscription, this is the order id of signup order if it is not renewed yet, or the last recurring order id (success, pending, or declined order). For prepaid subscription, this is the order id associated with the queried purchase token.", "type": "string" }, "lineItems": { diff --git a/googleapiclient/discovery_cache/documents/apigee.v1.json b/googleapiclient/discovery_cache/documents/apigee.v1.json index 5fe0d4705f..8b86cfe2c9 100644 --- a/googleapiclient/discovery_cache/documents/apigee.v1.json +++ b/googleapiclient/discovery_cache/documents/apigee.v1.json @@ -2251,7 +2251,7 @@ ] }, "updateAppGroupAppKey": { -"description": "Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.", +"description": "Adds an API product to an AppGroupAppKey, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes to the AppGroupAppKey. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.", "flatPath": "v1/organizations/{organizationsId}/appgroups/{appgroupsId}/apps/{appsId}/keys/{keysId}", "httpMethod": "POST", "id": "apigee.organizations.appgroups.apps.keys.updateAppGroupAppKey", @@ -3402,7 +3402,7 @@ ] }, "updateDeveloperAppKey": { -"description": "Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes to a developer app key. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. You can use the same key to access all API products associated with the app.", +"description": "Adds an API product to a developer app key, enabling the app that holds the key to access the API resources bundled in the API product. In addition, you can add attributes and scopes associated with the API product to the developer app key. The status of the key can be updated via \"action\" Query Parameter. None of the other fields can be updated via this API. This API replaces the existing attributes with those specified in the request. Include or exclude any existing attributes that you want to retain or delete, respectively. None of the other fields can be updated. You can use the same key to access all API products associated with the app.", "flatPath": "v1/organizations/{organizationsId}/developers/{developersId}/apps/{appsId}/keys/{keysId}", "httpMethod": "POST", "id": "apigee.organizations.developers.apps.keys.updateDeveloperAppKey", @@ -11063,7 +11063,7 @@ } } }, -"revision": "20250506", +"revision": "20250519", "rootUrl": "https://apigee.googleapis.com/", "schemas": { "EdgeConfigstoreBundleBadBundle": { @@ -19547,7 +19547,7 @@ }, "appGroupAppKey": { "$ref": "GoogleCloudApigeeV1AppGroupAppKey", -"description": "The new AppGroupKey to be amended. Note that the status can be updated only via action." +"description": "Note that only Scopes and Attributes of the AppGroupAppKey can be amended." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/apihub.v1.json b/googleapiclient/discovery_cache/documents/apihub.v1.json index 9feea2bf60..72e098a47a 100644 --- a/googleapiclient/discovery_cache/documents/apihub.v1.json +++ b/googleapiclient/discovery_cache/documents/apihub.v1.json @@ -2780,7 +2780,7 @@ } } }, -"revision": "20250324", +"revision": "20250514", "rootUrl": "https://apihub.googleapis.com/", "schemas": { "Empty": { @@ -3946,7 +3946,7 @@ "type": "string" }, "resourceUri": { -"description": "Required. A URI to the runtime resource. This URI can be used to manage the resource. For example, if the runtime resource is of type APIGEE_PROXY, then this field will contain the URI to the management UI of the proxy.", +"description": "Required. A uri that uniquely identfies the deployment within a particular gateway. For example, if the runtime resource is of type APIGEE_PROXY, then this field will be a combination of org, proxy name and environment.", "type": "string" }, "slo": { diff --git a/googleapiclient/discovery_cache/documents/apim.v1alpha.json b/googleapiclient/discovery_cache/documents/apim.v1alpha.json index 88ba6e3aa2..08ed374c3f 100644 --- a/googleapiclient/discovery_cache/documents/apim.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/apim.v1alpha.json @@ -143,6 +143,12 @@ "name" ], "parameters": { +"extraLocationTypes": { +"description": "Optional. A list of extra location types that should be used as conditions for controlling the visibility of the locations.", +"location": "query", +"repeated": true, +"type": "string" +}, "filter": { "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", "location": "query", @@ -690,7 +696,7 @@ "operations": { "methods": { "cancel": { -"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", "flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "apim.projects.locations.operations.cancel", @@ -815,7 +821,7 @@ } } }, -"revision": "20240717", +"revision": "20250521", "rootUrl": "https://apim.googleapis.com/", "schemas": { "ApiObservation": { diff --git a/googleapiclient/discovery_cache/documents/areainsights.v1.json b/googleapiclient/discovery_cache/documents/areainsights.v1.json index 4bb0983531..ba96a13360 100644 --- a/googleapiclient/discovery_cache/documents/areainsights.v1.json +++ b/googleapiclient/discovery_cache/documents/areainsights.v1.json @@ -128,7 +128,7 @@ } } }, -"revision": "20250325", +"revision": "20250521", "rootUrl": "https://areainsights.googleapis.com/", "schemas": { "Circle": { @@ -353,7 +353,7 @@ "id": "Region", "properties": { "place": { -"description": "The unique identifier of a specific geographic region.", +"description": "The [place ID](https://developers.google.com/maps/documentation/places/web-service/place-id) of the geographic region. Not all region types are supported; see documentation for details. **Format:** Must be in the format `places/PLACE_ID`, where `PLACE_ID` is the unique identifier of a place. For example: `places/ChIJPV4oX_65j4ARVW8IJ6IJUYs`.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json index 1052fa98d7..f92140fc4f 100644 --- a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json +++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json @@ -1127,7 +1127,7 @@ ] }, "submit": { -"description": "Submit a provisiong configuration for a given project.", +"description": "Submit a provisioning configuration for a given project.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/provisioningConfigs:submit", "httpMethod": "POST", "id": "baremetalsolution.projects.locations.provisioningConfigs.submit", @@ -1728,7 +1728,7 @@ } } }, -"revision": "20250414", +"revision": "20250519", "rootUrl": "https://baremetalsolution.googleapis.com/", "schemas": { "AllowedClient": { @@ -2072,7 +2072,7 @@ }, "id": { "deprecated": true, -"description": "A transient unique identifier to idenfity an instance within an ProvisioningConfig request.", +"description": "A transient unique identifier to identify an instance within an ProvisioningConfig request.", "type": "string" }, "instanceType": { diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json index 42c6061207..b11d2528ba 100644 --- a/googleapiclient/discovery_cache/documents/batch.v1.json +++ b/googleapiclient/discovery_cache/documents/batch.v1.json @@ -595,7 +595,7 @@ } } }, -"revision": "20250409", +"revision": "20250519", "rootUrl": "https://batch.googleapis.com/", "schemas": { "Accelerator": { @@ -1300,6 +1300,12 @@ "SPOT", "PREEMPTIBLE" ], +"enumDeprecated": [ +false, +false, +false, +true +], "enumDescriptions": [ "Unspecified.", "Standard VM.", @@ -1362,6 +1368,12 @@ "SPOT", "PREEMPTIBLE" ], +"enumDeprecated": [ +false, +false, +false, +true +], "enumDescriptions": [ "Unspecified.", "Standard VM.", diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json index 5442f76c96..2db6280c13 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json @@ -690,7 +690,7 @@ ] }, "getIamPolicy": { -"description": "Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.", +"description": "Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/reservations/{reservationsId}:getIamPolicy", "httpMethod": "GET", "id": "bigqueryreservation.projects.locations.reservations.getIamPolicy", @@ -915,6 +915,38 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. May return: * A`NOT_FOUND` error if the resource doesn't exist or you don't have the permission to view it. * An empty policy if the resource exists but doesn't have a set policy. Supported resources are: - Reservations - ReservationAssignments To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.getIamPolicy` to get policies on reservations.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/reservations/{reservationsId}/assignments/{assignmentsId}:getIamPolicy", +"httpMethod": "GET", +"id": "bigqueryreservation.projects.locations.reservations.assignments.getIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"options.requestedPolicyVersion": { +"description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", +"format": "int32", +"location": "query", +"type": "integer" +}, +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reservations/[^/]+/assignments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:getIamPolicy", +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigquery", +"https://www.googleapis.com/auth/cloud-platform" +] +}, "list": { "description": "Lists assignments. Only explicitly created assignments will be returned. Example: * Organization `organizationA` contains two projects, `project1` and `project2`. * Reservation `res1` exists and was created previously. * CreateAssignment was used previously to define the following associations between entities and reservations: `` and `` In this example, ListAssignments will just return the above two assignments for reservation `res1`, and no expansion/merge will happen. The wildcard \"-\" can be used for reservations in the request. In that case all assignments belongs to the specified project and location will be listed. **Note** \"-\" cannot be used for projects nor locations.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/reservations/{reservationsId}/assignments", @@ -1015,6 +1047,64 @@ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] +}, +"setIamPolicy": { +"description": "Sets an access control policy for a resource. Replaces any existing policy. Supported resources are: - Reservations To call this method, you must have the following Google IAM permissions: - `bigqueryreservation.reservations.setIamPolicy` to set policies on reservations.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/reservations/{reservationsId}/assignments/{assignmentsId}:setIamPolicy", +"httpMethod": "POST", +"id": "bigqueryreservation.projects.locations.reservations.assignments.setIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reservations/[^/]+/assignments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:setIamPolicy", +"request": { +"$ref": "SetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigquery", +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"testIamPermissions": { +"description": "Gets your permissions on a resource. Returns an empty set of permissions if the resource doesn't exist. Supported resources are: - Reservations No Google IAM permissions are required to call this method.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/reservations/{reservationsId}/assignments/{assignmentsId}:testIamPermissions", +"httpMethod": "POST", +"id": "bigqueryreservation.projects.locations.reservations.assignments.testIamPermissions", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reservations/[^/]+/assignments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:testIamPermissions", +"request": { +"$ref": "TestIamPermissionsRequest" +}, +"response": { +"$ref": "TestIamPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigquery", +"https://www.googleapis.com/auth/cloud-platform" +] } } } @@ -1025,7 +1115,7 @@ } } }, -"revision": "20250503", +"revision": "20250518", "rootUrl": "https://bigqueryreservation.googleapis.com/", "schemas": { "Assignment": { @@ -1389,7 +1479,22 @@ false "FailoverReservationRequest": { "description": "The request for ReservationService.FailoverReservation.", "id": "FailoverReservationRequest", -"properties": {}, +"properties": { +"failoverMode": { +"description": "Optional. failover mode for the failover operation.", +"enum": [ +"FAILOVER_MODE_UNSPECIFIED", +"SOFT", +"HARD" +], +"enumDescriptions": [ +"Invalid value.", +"When customers initiate a soft failover, BigQuery will wait until all committed writes are replicated to the secondary.", +"When customers initiate a hard failover, BigQuery will not wait until all committed writes are replicated to the secondary. There can be data loss for hard failover." +], +"type": "string" +} +}, "type": "object" }, "ListAssignmentsResponse": { @@ -1530,6 +1635,12 @@ false "format": "google-datetime", "readOnly": true, "type": "string" +}, +"softFailoverStartTime": { +"description": "Output only. The time at which a soft failover for the reservation and its associated datasets was initiated. After this field is set, all subsequent changes to the reservation will be rejected unless a hard failover overrides this operation. This field will be cleared once the failover is complete.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json index b9abefae34..1efce04388 100644 --- a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json +++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json @@ -131,6 +131,66 @@ "resources": { "operations": { "methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", +"flatPath": "v2/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "bigtableadmin.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^operations/.*$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}:cancel", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigtable.admin", +"https://www.googleapis.com/auth/bigtable.admin.cluster", +"https://www.googleapis.com/auth/bigtable.admin.instance", +"https://www.googleapis.com/auth/cloud-bigtable.admin", +"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v2/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "bigtableadmin.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^operations/.*$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigtable.admin", +"https://www.googleapis.com/auth/bigtable.admin.cluster", +"https://www.googleapis.com/auth/bigtable.admin.instance", +"https://www.googleapis.com/auth/cloud-bigtable.admin", +"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", +"https://www.googleapis.com/auth/cloud-platform" +] +}, "get": { "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v2/operations/{operationsId}", @@ -2719,6 +2779,37 @@ }, "locations": { "methods": { +"get": { +"description": "Gets information about a location.", +"flatPath": "v2/projects/{projectsId}/locations/{locationsId}", +"httpMethod": "GET", +"id": "bigtableadmin.projects.locations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Resource name for the location.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}", +"response": { +"$ref": "Location" +}, +"scopes": [ +"https://www.googleapis.com/auth/bigtable.admin", +"https://www.googleapis.com/auth/bigtable.admin.cluster", +"https://www.googleapis.com/auth/bigtable.admin.instance", +"https://www.googleapis.com/auth/cloud-bigtable.admin", +"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloud-platform.read-only" +] +}, "list": { "description": "Lists information about the supported locations for this service.", "flatPath": "v2/projects/{projectsId}/locations", @@ -2777,7 +2868,7 @@ } } }, -"revision": "20250430", +"revision": "20250512", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json index f60f10181b..6ea14d5373 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v2.json +++ b/googleapiclient/discovery_cache/documents/blogger.v2.json @@ -401,13 +401,14 @@ } } }, -"revision": "20240708", +"revision": "20250524", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { "id": "Blog", "properties": { "customMetaData": { +"deprecated": true, "description": "The JSON custom meta-data for the Blog.", "type": "string" }, @@ -899,6 +900,7 @@ "type": "string" }, "customMetaData": { +"deprecated": true, "description": "The JSON meta-data for the Post.", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json index 67ec667587..fd09633bb2 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v3.json +++ b/googleapiclient/discovery_cache/documents/blogger.v3.json @@ -1710,13 +1710,14 @@ } } }, -"revision": "20240708", +"revision": "20250524", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { "id": "Blog", "properties": { "customMetaData": { +"deprecated": true, "description": "The JSON custom meta-data for the Blog.", "type": "string" }, @@ -2250,6 +2251,7 @@ "type": "string" }, "customMetaData": { +"deprecated": true, "description": "The JSON meta-data for the Post.", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index 4796d5be72..375ad4e6bd 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -1526,7 +1526,7 @@ } } }, -"revision": "20250508", +"revision": "20250516", "rootUrl": "https://chat.googleapis.com/", "schemas": { "AccessSettings": { @@ -3468,9 +3468,6 @@ "description": "For multiselect menus, a text description or label that's displayed below the item's `text` field.", "type": "string" }, -"materialIcon": { -"$ref": "GoogleAppsCardV1MaterialIcon" -}, "selected": { "description": "Whether the item is selected by default. If the selection input only accepts one value (such as for radio buttons or a dropdown menu), only set this field for one item.", "type": "boolean" diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 4963829cb6..97521f5a05 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -228,6 +228,32 @@ "https://www.googleapis.com/auth/cloudkms" ] }, +"getKajPolicyConfig": { +"description": "Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/folders/{foldersId}/kajPolicyConfig", +"httpMethod": "GET", +"id": "cloudkms.folders.getKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the KeyAccessJustificationsPolicyConfig to get.", +"location": "path", +"pattern": "^folders/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, "updateAutokeyConfig": { "description": "Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. A KeyHandle creation in the folder's descendant projects will use this configuration to determine where to create the resulting CryptoKey.", "flatPath": "v1/folders/{foldersId}/autokeyConfig", @@ -262,11 +288,137 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloudkms" ] +}, +"updateKajPolicyConfig": { +"description": "Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/folders/{foldersId}/kajPolicyConfig", +"httpMethod": "PATCH", +"id": "cloudkms.folders.updateKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of \"{organizations|folders|projects}/*/kajPolicyConfig\".", +"location": "path", +"pattern": "^folders/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +} +} +}, +"organizations": { +"methods": { +"getKajPolicyConfig": { +"description": "Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/organizations/{organizationsId}/kajPolicyConfig", +"httpMethod": "GET", +"id": "cloudkms.organizations.getKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the KeyAccessJustificationsPolicyConfig to get.", +"location": "path", +"pattern": "^organizations/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"updateKajPolicyConfig": { +"description": "Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/organizations/{organizationsId}/kajPolicyConfig", +"httpMethod": "PATCH", +"id": "cloudkms.organizations.updateKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of \"{organizations|folders|projects}/*/kajPolicyConfig\".", +"location": "path", +"pattern": "^organizations/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] } } }, "projects": { "methods": { +"getKajPolicyConfig": { +"description": "Gets the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/projects/{projectsId}/kajPolicyConfig", +"httpMethod": "GET", +"id": "cloudkms.projects.getKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the KeyAccessJustificationsPolicyConfig to get.", +"location": "path", +"pattern": "^projects/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, "showEffectiveAutokeyConfig": { "description": "Returns the effective Cloud KMS Autokey configuration for a given project.", "flatPath": "v1/projects/{projectsId}:showEffectiveAutokeyConfig", @@ -292,6 +444,93 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloudkms" ] +}, +"showEffectiveKeyAccessJustificationsEnrollmentConfig": { +"description": "Returns the KeyAccessJustificationsEnrollmentConfig of the resource closest to the given project in hierarchy.", +"flatPath": "v1/projects/{projectsId}:showEffectiveKeyAccessJustificationsEnrollmentConfig", +"httpMethod": "GET", +"id": "cloudkms.projects.showEffectiveKeyAccessJustificationsEnrollmentConfig", +"parameterOrder": [ +"project" +], +"parameters": { +"project": { +"description": "Required. The number or id of the project to get the effective KeyAccessJustificationsEnrollmentConfig for.", +"location": "path", +"pattern": "^projects/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+project}:showEffectiveKeyAccessJustificationsEnrollmentConfig", +"response": { +"$ref": "ShowEffectiveKeyAccessJustificationsEnrollmentConfigResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"showEffectiveKeyAccessJustificationsPolicyConfig": { +"description": "Returns the KeyAccessJustificationsPolicyConfig of the resource closest to the given project in hierarchy.", +"flatPath": "v1/projects/{projectsId}:showEffectiveKeyAccessJustificationsPolicyConfig", +"httpMethod": "GET", +"id": "cloudkms.projects.showEffectiveKeyAccessJustificationsPolicyConfig", +"parameterOrder": [ +"project" +], +"parameters": { +"project": { +"description": "Required. The number or id of the project to get the effective KeyAccessJustificationsPolicyConfig. In the format of \"projects/{|}\"", +"location": "path", +"pattern": "^projects/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+project}:showEffectiveKeyAccessJustificationsPolicyConfig", +"response": { +"$ref": "ShowEffectiveKeyAccessJustificationsPolicyConfigResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"updateKajPolicyConfig": { +"description": "Updates the KeyAccessJustificationsPolicyConfig for a given organization/folder/projects.", +"flatPath": "v1/projects/{projectsId}/kajPolicyConfig", +"httpMethod": "PATCH", +"id": "cloudkms.projects.updateKajPolicyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of \"{organizations|folders|projects}/*/kajPolicyConfig\".", +"location": "path", +"pattern": "^projects/[^/]+/kajPolicyConfig$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"response": { +"$ref": "KeyAccessJustificationsPolicyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] } }, "resources": { @@ -2163,7 +2402,7 @@ } } }, -"revision": "20250501", +"revision": "20250516", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { @@ -2347,6 +2586,10 @@ "description": "Cloud KMS Autokey configuration for a folder.", "id": "AutokeyConfig", "properties": { +"etag": { +"description": "Optional. A checksum computed by the server based on the value of other fields. This may be sent on update requests to ensure that the client has an up-to-date value before proceeding. The request will be rejected with an ABORTED error on a mismatched etag.", +"type": "string" +}, "keyProject": { "description": "Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new CryptoKey when a KeyHandle is created. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions). A request with an empty key project field will clear the configuration.", "type": "string" @@ -2777,7 +3020,7 @@ "This version is still being generated. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", "This version may be used for cryptographic operations.", "This version may not be used, but the key material is still available, and the version can be placed back into the ENABLED state.", -"This key material of this version is destroyed and no longer stored. This version may only become ENABLED again if this version is reimport_eligible and the original key material is reimported with a call to KeyManagementService.ImportCryptoKeyVersion.", +"The key material of this version is destroyed and no longer stored. This version may only become ENABLED again if this version is reimport_eligible and the original key material is reimported with a call to KeyManagementService.ImportCryptoKeyVersion.", "This version is scheduled for destruction, and will be destroyed soon. Call RestoreCryptoKeyVersion to put it back into the DISABLED state.", "This version is still being imported. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", "This version was not imported successfully. It may not be used, enabled, disabled, or destroyed. The submitted key material has been discarded. Additional details can be found in CryptoKeyVersion.import_failure_reason.", @@ -3427,6 +3670,21 @@ }, "type": "object" }, +"KeyAccessJustificationsEnrollmentConfig": { +"description": "The configuration of a protection level for a project's Key Access Justifications enrollment.", +"id": "KeyAccessJustificationsEnrollmentConfig", +"properties": { +"auditLogging": { +"description": "Whether the project has KAJ logging enabled.", +"type": "boolean" +}, +"policyEnforcement": { +"description": "Whether the project is enrolled in KAJ policy enforcement.", +"type": "boolean" +} +}, +"type": "object" +}, "KeyAccessJustificationsPolicy": { "description": "A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey.", "id": "KeyAccessJustificationsPolicy", @@ -3469,6 +3727,21 @@ }, "type": "object" }, +"KeyAccessJustificationsPolicyConfig": { +"description": "A singleton configuration for Key Access Justifications policies.", +"id": "KeyAccessJustificationsPolicyConfig", +"properties": { +"defaultKeyAccessJustificationPolicy": { +"$ref": "KeyAccessJustificationsPolicy", +"description": "Optional. The default key access justification policy used when a CryptoKey is created in this folder. This is only used when a Key Access Justifications policy is not provided in the CreateCryptoKeyRequest. This overrides any default policies in its ancestry." +}, +"name": { +"description": "Identifier. The resource name for this KeyAccessJustificationsPolicyConfig in the format of \"{organizations|folders|projects}/*/kajPolicyConfig\".", +"type": "string" +} +}, +"type": "object" +}, "KeyHandle": { "description": "Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.", "id": "KeyHandle", @@ -4329,6 +4602,36 @@ }, "type": "object" }, +"ShowEffectiveKeyAccessJustificationsEnrollmentConfigResponse": { +"description": "Response message for KeyAccessJustificationsConfig.ShowEffectiveKeyAccessJustificationsEnrollmentConfig", +"id": "ShowEffectiveKeyAccessJustificationsEnrollmentConfigResponse", +"properties": { +"externalConfig": { +"$ref": "KeyAccessJustificationsEnrollmentConfig", +"description": "The effective KeyAccessJustificationsEnrollmentConfig for external keys." +}, +"hardwareConfig": { +"$ref": "KeyAccessJustificationsEnrollmentConfig", +"description": "The effective KeyAccessJustificationsEnrollmentConfig for hardware keys." +}, +"softwareConfig": { +"$ref": "KeyAccessJustificationsEnrollmentConfig", +"description": "The effective KeyAccessJustificationsEnrollmentConfig for software keys." +} +}, +"type": "object" +}, +"ShowEffectiveKeyAccessJustificationsPolicyConfigResponse": { +"description": "Response message for KeyAccessJustificationsConfig.ShowEffectiveKeyAccessJustificationsPolicyConfig.", +"id": "ShowEffectiveKeyAccessJustificationsPolicyConfigResponse", +"properties": { +"effectiveKajPolicy": { +"$ref": "KeyAccessJustificationsPolicyConfig", +"description": "The effective KeyAccessJustificationsPolicyConfig." +} +}, +"type": "object" +}, "Status": { "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json index d88cc5120d..e266f1d880 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json @@ -303,7 +303,7 @@ ], "parameters": { "name": { -"description": "The resource name for the case.", +"description": "Identifier. The resource name for the case.", "location": "path", "pattern": "^[^/]+/[^/]+/cases/[^/]+$", "required": true, @@ -552,7 +552,7 @@ } } }, -"revision": "20241231", +"revision": "20250521", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { @@ -565,7 +565,7 @@ }, "email": { "deprecated": true, -"description": "The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead.", +"description": "The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead.", "type": "string" }, "googleSupport": { @@ -606,7 +606,7 @@ "type": "string" }, "name": { -"description": "Output only. The resource name of the attachment.", +"description": "Output only. Identifier. The resource name of the attachment.", "readOnly": true, "type": "string" }, @@ -688,7 +688,7 @@ "type": "string" }, "name": { -"description": "The resource name for the case.", +"description": "Identifier. The resource name for the case.", "type": "string" }, "priority": { diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json index 07f37e6170..1642d4f484 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json @@ -333,7 +333,7 @@ ], "parameters": { "name": { -"description": "The resource name for the case.", +"description": "Identifier. The resource name for the case.", "location": "path", "pattern": "^[^/]+/[^/]+/cases/[^/]+$", "required": true, @@ -619,7 +619,7 @@ } } }, -"revision": "20241231", +"revision": "20250521", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { @@ -632,7 +632,7 @@ }, "email": { "deprecated": true, -"description": "The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use **username** field instead.", +"description": "The email address of the actor. If not provided, it is inferred from the credentials supplied during case creation. When a name is provided, an email must also be provided. If the user is a Google Support agent, this is obfuscated. This field is deprecated. Use `username` instead.", "type": "string" }, "googleSupport": { @@ -673,7 +673,7 @@ "type": "string" }, "name": { -"description": "Output only. The resource name of the attachment.", +"description": "Output only. Identifier. The resource name of the attachment.", "readOnly": true, "type": "string" }, @@ -755,7 +755,7 @@ "type": "string" }, "name": { -"description": "The resource name for the case.", +"description": "Identifier. The resource name for the case.", "type": "string" }, "priority": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json index 8e5b1b8f16..1c56b2a95f 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1.json @@ -646,6 +646,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"restartWebServer": { +"description": "Restart Airflow web server.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:restartWebServer", +"httpMethod": "POST", +"id": "composer.projects.locations.environments.restartWebServer", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the environment to restart the web server for, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:restartWebServer", +"request": { +"$ref": "RestartWebServerRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "saveSnapshot": { "description": "Creates a snapshots of a Cloud Composer environment. As a result of this operation, snapshot of environment's state is stored in a location specified in the SaveSnapshotRequest.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:saveSnapshot", @@ -1188,7 +1216,7 @@ } } }, -"revision": "20250303", +"revision": "20250513", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { @@ -2331,6 +2359,12 @@ }, "type": "object" }, +"RestartWebServerRequest": { +"description": "Restart Airflow web server.", +"id": "RestartWebServerRequest", +"properties": {}, +"type": "object" +}, "SaveSnapshotRequest": { "description": "Request to create a snapshot of a Cloud Composer environment.", "id": "SaveSnapshotRequest", diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json index 62c825a8bc..30d91482b7 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json @@ -656,7 +656,7 @@ ], "parameters": { "name": { -"description": "The resource name of the environment to restart the web server for, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", +"description": "Required. The resource name of the environment to restart the web server for, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", "required": true, @@ -1216,7 +1216,7 @@ } } }, -"revision": "20250303", +"revision": "20250513", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index b2510814b7..cc2d6e80b9 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -748,6 +748,51 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] +}, +"capacity": { +"description": "Advice on making real-time decisions (such as choosing zone or machine types) during deployment to maximize your chances of obtaining capacity.", +"flatPath": "projects/{project}/regions/{region}/advice/capacity", +"httpMethod": "POST", +"id": "compute.advice.capacity", +"parameterOrder": [ +"project", +"region", +"size" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"size": { +"description": "Size of requested capacity.", +"format": "int32", +"location": "query", +"required": true, +"type": "integer" +} +}, +"path": "projects/{project}/regions/{region}/advice/capacity", +"request": { +"$ref": "CapacityAdviceRequest" +}, +"response": { +"$ref": "CapacityAdviceResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] } } }, @@ -25404,26 +25449,31 @@ } } }, -"regionBackendServices": { +"regionBackendBuckets": { "methods": { "delete": { -"description": "Deletes the specified regional BackendService resource.", -"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"description": "Deletes the specified regional BackendBucket resource.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", "httpMethod": "DELETE", -"id": "compute.regionBackendServices.delete", +"id": "compute.regionBackendBuckets.delete", "parameterOrder": [ "project", "region", -"backendService" +"backendBucket" ], "parameters": { -"backendService": { -"description": "Name of the BackendService resource to delete.", +"backendBucket": { +"description": "Name of the BackendBucket resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, +"forceDelete": { +"description": "Force delete the backend bucket even if it is still in use by other resources. It's intended to be used internally only for requests from wipeout.", +"location": "query", +"type": "boolean" +}, "project": { "description": "Project ID for this request.", "location": "path", @@ -25439,12 +25489,12 @@ "type": "string" }, "requestId": { -"description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +"description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" } }, -"path": "projects/{project}/regions/{region}/backendServices/{backendService}", +"path": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", "response": { "$ref": "Operation" }, @@ -25454,18 +25504,18 @@ ] }, "get": { -"description": "Returns the specified regional BackendService resource.", -"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"description": "Returns the specified regional BackendBucket resource.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", "httpMethod": "GET", -"id": "compute.regionBackendServices.get", +"id": "compute.regionBackendBuckets.get", "parameterOrder": [ "project", "region", -"backendService" +"backendBucket" ], "parameters": { -"backendService": { -"description": "Name of the BackendService resource to return.", +"backendBucket": { +"description": "Name of the BackendBucket resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -25486,54 +25536,9 @@ "type": "string" } }, -"path": "projects/{project}/regions/{region}/backendServices/{backendService}", -"response": { -"$ref": "BackendService" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform", -"https://www.googleapis.com/auth/compute", -"https://www.googleapis.com/auth/compute.readonly" -] -}, -"getHealth": { -"description": "Gets the most recent health check results for this regional BackendService.", -"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", -"httpMethod": "POST", -"id": "compute.regionBackendServices.getHealth", -"parameterOrder": [ -"project", -"region", -"backendService" -], -"parameters": { -"backendService": { -"description": "Name of the BackendService resource for which to get health.", -"location": "path", -"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", -"required": true, -"type": "string" -}, -"project": { -"location": "path", -"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", -"required": true, -"type": "string" -}, -"region": { -"description": "Name of the region scoping this request.", -"location": "path", -"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", -"required": true, -"type": "string" -} -}, -"path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", -"request": { -"$ref": "ResourceGroupReference" -}, +"path": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", "response": { -"$ref": "BackendServiceGroupHealth" +"$ref": "BackendBucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25543,9 +25548,9 @@ }, "getIamPolicy": { "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", -"flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{resource}/getIamPolicy", "httpMethod": "GET", -"id": "compute.regionBackendServices.getIamPolicy", +"id": "compute.regionBackendBuckets.getIamPolicy", "parameterOrder": [ "project", "region", @@ -25580,7 +25585,7 @@ "type": "string" } }, -"path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", +"path": "projects/{project}/regions/{region}/backendBuckets/{resource}/getIamPolicy", "response": { "$ref": "Policy" }, @@ -25591,10 +25596,10 @@ ] }, "insert": { -"description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", -"flatPath": "projects/{project}/regions/{region}/backendServices", +"description": "Creates a RegionBackendBucket in the specified project in the given scope using the parameters that are included in the request.", +"flatPath": "projects/{project}/regions/{region}/regionBackendBuckets", "httpMethod": "POST", -"id": "compute.regionBackendServices.insert", +"id": "compute.regionBackendBuckets.insert", "parameterOrder": [ "project", "region" @@ -25608,7 +25613,7 @@ "type": "string" }, "region": { -"description": "Name of the region scoping this request.", +"description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -25620,9 +25625,9 @@ "type": "string" } }, -"path": "projects/{project}/regions/{region}/backendServices", +"path": "projects/{project}/regions/{region}/regionBackendBuckets", "request": { -"$ref": "BackendService" +"$ref": "BackendBucket" }, "response": { "$ref": "Operation" @@ -25633,10 +25638,10 @@ ] }, "list": { -"description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", -"flatPath": "projects/{project}/regions/{region}/backendServices", +"description": "Retrieves the list of BackendBucket resources available to the specified project in the given region.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets", "httpMethod": "GET", -"id": "compute.regionBackendServices.list", +"id": "compute.regionBackendBuckets.list", "parameterOrder": [ "project", "region" @@ -25673,7 +25678,7 @@ "type": "string" }, "region": { -"description": "Name of the region scoping this request.", +"description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -25685,9 +25690,9 @@ "type": "boolean" } }, -"path": "projects/{project}/regions/{region}/backendServices", +"path": "projects/{project}/regions/{region}/backendBuckets", "response": { -"$ref": "BackendServiceList" +"$ref": "BackendBucketList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25696,10 +25701,10 @@ ] }, "listUsable": { -"description": "Retrieves a list of all usable backend services in the specified project in the given region.", -"flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", +"description": "Retrieves a list of all usable backend buckets in the specified project in the given region.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/listUsable", "httpMethod": "GET", -"id": "compute.regionBackendServices.listUsable", +"id": "compute.regionBackendBuckets.listUsable", "parameterOrder": [ "project", "region" @@ -25738,6 +25743,7 @@ "region": { "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", "location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -25747,9 +25753,9 @@ "type": "boolean" } }, -"path": "projects/{project}/regions/{region}/backendServices/listUsable", +"path": "projects/{project}/regions/{region}/backendBuckets/listUsable", "response": { -"$ref": "BackendServiceListUsable" +"$ref": "BackendBucketListUsable" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25758,10 +25764,155 @@ ] }, "patch": { -"description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", -"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", "httpMethod": "PATCH", -"id": "compute.regionBackendServices.patch", +"id": "compute.regionBackendBuckets.patch", +"parameterOrder": [ +"project", +"region", +"backendBucket" +], +"parameters": { +"backendBucket": { +"description": "Name of the BackendBucket resource to patch.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendBuckets/{backendBucket}", +"request": { +"$ref": "BackendBucket" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{resource}/setIamPolicy", +"httpMethod": "POST", +"id": "compute.regionBackendBuckets.setIamPolicy", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendBuckets/{resource}/setIamPolicy", +"request": { +"$ref": "RegionSetPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource.", +"flatPath": "projects/{project}/regions/{region}/backendBuckets/{resource}/testIamPermissions", +"httpMethod": "POST", +"id": "compute.regionBackendBuckets.testIamPermissions", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendBuckets/{resource}/testIamPermissions", +"request": { +"$ref": "TestPermissionsRequest" +}, +"response": { +"$ref": "TestPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +} +} +}, +"regionBackendServices": { +"methods": { +"delete": { +"description": "Deletes the specified regional BackendService resource.", +"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"httpMethod": "DELETE", +"id": "compute.regionBackendServices.delete", "parameterOrder": [ "project", "region", @@ -25769,7 +25920,358 @@ ], "parameters": { "backendService": { -"description": "Name of the BackendService resource to patch.", +"description": "Name of the BackendService resource to delete.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendServices/{backendService}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"get": { +"description": "Returns the specified regional BackendService resource.", +"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"httpMethod": "GET", +"id": "compute.regionBackendServices.get", +"parameterOrder": [ +"project", +"region", +"backendService" +], +"parameters": { +"backendService": { +"description": "Name of the BackendService resource to return.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendServices/{backendService}", +"response": { +"$ref": "BackendService" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"getHealth": { +"description": "Gets the most recent health check results for this regional BackendService.", +"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", +"httpMethod": "POST", +"id": "compute.regionBackendServices.getHealth", +"parameterOrder": [ +"project", +"region", +"backendService" +], +"parameters": { +"backendService": { +"description": "Name of the BackendService resource for which to get health.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", +"request": { +"$ref": "ResourceGroupReference" +}, +"response": { +"$ref": "BackendServiceGroupHealth" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +"flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", +"httpMethod": "GET", +"id": "compute.regionBackendServices.getIamPolicy", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"optionsRequestedPolicyVersion": { +"description": "Requested IAM Policy version.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"insert": { +"description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", +"flatPath": "projects/{project}/regions/{region}/backendServices", +"httpMethod": "POST", +"id": "compute.regionBackendServices.insert", +"parameterOrder": [ +"project", +"region" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/backendServices", +"request": { +"$ref": "BackendService" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"list": { +"description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", +"flatPath": "projects/{project}/regions/{region}/backendServices", +"httpMethod": "GET", +"id": "compute.regionBackendServices.list", +"parameterOrder": [ +"project", +"region" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.", +"location": "query", +"type": "boolean" +} +}, +"path": "projects/{project}/regions/{region}/backendServices", +"response": { +"$ref": "BackendServiceList" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"listUsable": { +"description": "Retrieves a list of all usable backend services in the specified project in the given region.", +"flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", +"httpMethod": "GET", +"id": "compute.regionBackendServices.listUsable", +"parameterOrder": [ +"project", +"region" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", +"location": "path", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.", +"location": "query", +"type": "boolean" +} +}, +"path": "projects/{project}/regions/{region}/backendServices/listUsable", +"response": { +"$ref": "BackendServiceListUsable" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"patch": { +"description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +"flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", +"httpMethod": "PATCH", +"id": "compute.regionBackendServices.patch", +"parameterOrder": [ +"project", +"region", +"backendService" +], +"parameters": { +"backendService": { +"description": "Name of the BackendService resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -49568,7 +50070,7 @@ } } }, -"revision": "20250505", +"revision": "20250511", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -53286,7 +53788,7 @@ false "type": "string" }, "network": { -"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL.", +"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled.", "type": "string" }, "networkPassThroughLbTrafficPolicy": { @@ -55271,6 +55773,207 @@ false }, "type": "object" }, +"CapacityAdviceRequest": { +"description": "A request to provide Assistant Scores. These scores determine VM obtainability and preemption likelihood.", +"id": "CapacityAdviceRequest", +"properties": { +"distributionPolicy": { +"$ref": "CapacityAdviceRequestDistributionPolicy", +"description": "Policy specifying the distribution of instances across zones within the requested region." +}, +"instanceFlexibilityPolicy": { +"$ref": "CapacityAdviceRequestInstanceFlexibilityPolicy", +"description": "Policy for instance selectors." +}, +"instanceProperties": { +"$ref": "CapacityAdviceRequestInstanceProperties", +"description": "Instance properties for this request." +} +}, +"type": "object" +}, +"CapacityAdviceRequestDistributionPolicy": { +"id": "CapacityAdviceRequestDistributionPolicy", +"properties": { +"targetShape": { +"description": "The distribution shape to which the group converges.", +"enum": [ +"ANY", +"ANY_SINGLE_ZONE", +"BALANCED", +"TARGET_SHAPE_UNSPECIFIED" +], +"enumDescriptions": [ +"The group picks zones for creating VM instances to fulfill the requested number of VMs within present resource constraints and to maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability.", +"The group creates all VM instances within a single zone. The zone is selected based on the present resource constraints and to maximize utilization of unused zonal reservations. Recommended for batch workloads with heavy interprocess communication.", +"The group prioritizes acquisition of resources, scheduling VMs in zones where resources are available while distributing VMs as evenly as possible across selected zones to minimize the impact of zonal failure. Recommended for highly available serving workloads.", +"" +], +"type": "string" +}, +"zones": { +"description": "Zones where Capacity Advisor looks for capacity.", +"items": { +"$ref": "CapacityAdviceRequestDistributionPolicyZoneConfiguration" +}, +"type": "array" +} +}, +"type": "object" +}, +"CapacityAdviceRequestDistributionPolicyZoneConfiguration": { +"id": "CapacityAdviceRequestDistributionPolicyZoneConfiguration", +"properties": { +"zone": { +"description": "The URL of the zone.", +"type": "string" +} +}, +"type": "object" +}, +"CapacityAdviceRequestInstanceFlexibilityPolicy": { +"description": "Specification of alternative, flexible instance subsets.", +"id": "CapacityAdviceRequestInstanceFlexibilityPolicy", +"properties": { +"instanceSelections": { +"additionalProperties": { +"$ref": "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection" +}, +"description": "Named instance selections configure properties. The key is an arbitrary, unique RFC1035 string that identifies the instance selection.", +"type": "object" +} +}, +"type": "object" +}, +"CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection": { +"description": "Machine specification.", +"id": "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection", +"properties": { +"machineTypes": { +"description": "Full machine-type names, e.g. \"n1-standard-16\".", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"CapacityAdviceRequestInstanceProperties": { +"description": "Instance provisining properties.", +"id": "CapacityAdviceRequestInstanceProperties", +"properties": { +"scheduling": { +"$ref": "CapacityAdviceRequestInstancePropertiesScheduling", +"description": "Specifies the scheduling options." +} +}, +"type": "object" +}, +"CapacityAdviceRequestInstancePropertiesScheduling": { +"description": "Defines the instance scheduling options.", +"id": "CapacityAdviceRequestInstancePropertiesScheduling", +"properties": { +"provisioningModel": { +"description": "Specifies the provisioning model of the instance.", +"enum": [ +"FLEX_START", +"RESERVATION_BOUND", +"SPOT", +"STANDARD" +], +"enumDescriptions": [ +"Instance is provisioned using the Flex Start provisioning model and has a limited runtime.", +"Bound to the lifecycle of the reservation in which it is provisioned.", +"Heavily discounted, no guaranteed runtime.", +"Standard provisioning with user controlled runtime, no discounts." +], +"type": "string" +} +}, +"type": "object" +}, +"CapacityAdviceResponse": { +"description": "A response contains multiple scoring recommendations.", +"id": "CapacityAdviceResponse", +"properties": { +"recommendations": { +"description": "Initially the API will provide one recommendation which balances the individual scores according to Google's preference.", +"items": { +"$ref": "CapacityAdviceResponseRecommendation" +}, +"type": "array" +} +}, +"type": "object" +}, +"CapacityAdviceResponseRecommendation": { +"id": "CapacityAdviceResponseRecommendation", +"properties": { +"scores": { +"$ref": "CapacityAdviceResponseRecommendationScores" +}, +"shards": { +"items": { +"$ref": "CapacityAdviceResponseRecommendationShard" +}, +"type": "array" +} +}, +"type": "object" +}, +"CapacityAdviceResponseRecommendationScores": { +"description": "The Scores message groups information about a shard of capacity.", +"id": "CapacityAdviceResponseRecommendationScores", +"properties": { +"obtainability": { +"description": "The obtainability score indicates the likelihood of successfully obtaining (provisioning) the requested number of VMs. The score range is 0.0 through 1.0. Higher is better.", +"format": "double", +"type": "number" +}, +"spotPreemption": { +"description": "The preemption score indicates the likelihood that your Spot VMs is preempted. For more information about the preemption process, see Preemption of Spot VMs. The score range is 0.0 through 1.0. Higher is better.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"CapacityAdviceResponseRecommendationShard": { +"description": "Shards represent blocks of uniform capacity in recommendations. Each shard is for a single zone, single instance selection, and a single machine shape. Each shard defines a size expressed as the number of VMs.", +"id": "CapacityAdviceResponseRecommendationShard", +"properties": { +"instanceCount": { +"format": "int32", +"type": "integer" +}, +"machineType": { +"description": "The machine type corresponds to the instance selection in the request.", +"type": "string" +}, +"provisioningModel": { +"description": "Provisioning model of the recommended capacity.", +"enum": [ +"FLEX_START", +"RESERVATION_BOUND", +"SPOT", +"STANDARD" +], +"enumDescriptions": [ +"Instance is provisioned using the Flex Start provisioning model and has a limited runtime.", +"Bound to the lifecycle of the reservation in which it is provisioned.", +"Heavily discounted, no guaranteed runtime.", +"Standard provisioning with user controlled runtime, no discounts." +], +"type": "string" +}, +"zone": { +"description": "The zone name for this shard.", +"type": "string" +} +}, +"type": "object" +}, "ChannelCredentials": { "description": "[Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server.", "id": "ChannelCredentials", @@ -67439,7 +68142,7 @@ false }, "targetSizePolicy": { "$ref": "InstanceGroupManagerTargetSizePolicy", -"description": "Configures how target size of MIG is achieved." +"description": "The policy that specifies how the MIG creates its VMs to achieve the target size." }, "targetSizeUnit": { "description": "The unit of measure for the target size.", @@ -68522,7 +69225,7 @@ false }, "bulkInstanceOperation": { "$ref": "InstanceGroupManagerStatusBulkInstanceOperation", -"description": "[Output Only] Status of bulk instance operation." +"description": "[Output Only] The status of bulk instance operation." }, "isStable": { "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", @@ -68554,6 +69257,7 @@ false "type": "object" }, "InstanceGroupManagerStatusBulkInstanceOperation": { +"description": "Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK.", "id": "InstanceGroupManagerStatusBulkInstanceOperation", "properties": { "inProgress": { @@ -68562,7 +69266,7 @@ false }, "lastProgressCheck": { "$ref": "InstanceGroupManagerStatusBulkInstanceOperationLastProgressCheck", -"description": "[Output Only] Information from last progress check of bulk instance operation." +"description": "[Output Only] Information from the last progress check of bulk instance operation." } }, "type": "object" @@ -68571,7 +69275,7 @@ false "id": "InstanceGroupManagerStatusBulkInstanceOperationLastProgressCheck", "properties": { "error": { -"description": "[Output Only] Contains errors encountered during bulk instance operation.", +"description": "[Output Only] Errors encountered during bulk instance operation.", "properties": { "errors": { "description": "[Output Only] The array of errors encountered while processing this operation.", @@ -68669,15 +69373,15 @@ false "id": "InstanceGroupManagerTargetSizePolicy", "properties": { "mode": { -"description": "Mode in which operations on size are processed.", +"description": "The mode of target size policy based on which the MIG creates its VMs individually or all at once.", "enum": [ "BULK", "INDIVIDUAL", "UNSPECIFIED_MODE" ], "enumDescriptions": [ -"Mode in which MIG creates and starts VMs in all-or-nothing manner. If any VM from the request cannot be provisioned, the whole request waits for conditions that allow for provisioning whole capacity in bulk.", -"Default mode in which MIG creates and starts VMs individually without cross-dependency between VMs. This means that in case of something blocking part of VMs to be provisioned, the other part will be created.", +"The mode in which the MIG creates VMs all at once. In this mode, if the MIG is unable to create even one VM, the MIG waits until all VMs can be created at the same time.", +"The mode in which the MIG creates VMs individually. In this mode, if the MIG is unable to create a VM, the MIG will continue to create the other VMs in the group. This is the default mode.", "If mode is unspecified, MIG will behave as in the default `INDIVIDUAL` mode." ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index d129d4a44d..38765b59c0 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -45535,7 +45535,7 @@ } } }, -"revision": "20250505", +"revision": "20250511", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -49097,7 +49097,7 @@ false "type": "string" }, "network": { -"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL.", +"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled.", "type": "string" }, "networkPassThroughLbTrafficPolicy": { @@ -51173,6 +51173,7 @@ false "MEMORY_OPTIMIZED", "MEMORY_OPTIMIZED_M3", "MEMORY_OPTIMIZED_M4", +"MEMORY_OPTIMIZED_M4_6TB", "MEMORY_OPTIMIZED_X4_16TB", "MEMORY_OPTIMIZED_X4_24TB", "MEMORY_OPTIMIZED_X4_32TB", @@ -51208,6 +51209,7 @@ false "", "", "", +"", "Note for internal users: When adding a new enum Type for v1, make sure to also add it in the comment for the `optional Type type` definition. This ensures that the public documentation displays the new enum Type." ], "type": "string" @@ -55182,6 +55184,24 @@ false ], "type": "string" }, +"destNetworkType": { +"description": "Network type of the traffic destination. Allowed values are: - UNSPECIFIED - INTERNET - NON_INTERNET ", +"enum": [ +"INTERNET", +"INTRA_VPC", +"NON_INTERNET", +"UNSPECIFIED", +"VPC_NETWORKS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], +"type": "string" +}, "destRegionCodes": { "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", "items": { @@ -55242,6 +55262,24 @@ false ], "type": "string" }, +"srcNetworkType": { +"description": "Network type of the traffic source. Allowed values are: - UNSPECIFIED - INTERNET - INTRA_VPC - NON_INTERNET - VPC_NETWORKS ", +"enum": [ +"INTERNET", +"INTRA_VPC", +"NON_INTERNET", +"UNSPECIFIED", +"VPC_NETWORKS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], +"type": "string" +}, "srcNetworks": { "description": "Networks of the traffic source. It can be either a full or partial url.", "items": { @@ -61060,6 +61098,10 @@ false "format": "int32", "type": "integer" }, +"targetSizePolicy": { +"$ref": "InstanceGroupManagerTargetSizePolicy", +"description": "The policy that specifies how the MIG creates its VMs to achieve the target size." +}, "targetStoppedSize": { "description": "The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. ", "format": "int32", @@ -62031,6 +62073,10 @@ false "description": "[Output Only] The URL of the Autoscaler that targets this instance group manager.", "type": "string" }, +"bulkInstanceOperation": { +"$ref": "InstanceGroupManagerStatusBulkInstanceOperation", +"description": "[Output Only] The status of bulk instance operation." +}, "isStable": { "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", "type": "boolean" @@ -62060,6 +62106,80 @@ false }, "type": "object" }, +"InstanceGroupManagerStatusBulkInstanceOperation": { +"description": "Bulk instance operation is the creation of VMs in a MIG when the targetSizePolicy.mode is set to BULK.", +"id": "InstanceGroupManagerStatusBulkInstanceOperation", +"properties": { +"inProgress": { +"description": "[Output Only] Informs whether bulk instance operation is in progress.", +"type": "boolean" +}, +"lastProgressCheck": { +"$ref": "InstanceGroupManagerStatusBulkInstanceOperationLastProgressCheck", +"description": "[Output Only] Information from the last progress check of bulk instance operation." +} +}, +"type": "object" +}, +"InstanceGroupManagerStatusBulkInstanceOperationLastProgressCheck": { +"id": "InstanceGroupManagerStatusBulkInstanceOperationLastProgressCheck", +"properties": { +"error": { +"description": "[Output Only] Errors encountered during bulk instance operation.", +"properties": { +"errors": { +"description": "[Output Only] The array of errors encountered while processing this operation.", +"items": { +"properties": { +"code": { +"description": "[Output Only] The error type identifier for this error.", +"type": "string" +}, +"errorDetails": { +"description": "[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.", +"items": { +"properties": { +"errorInfo": { +"$ref": "ErrorInfo" +}, +"help": { +"$ref": "Help" +}, +"localizedMessage": { +"$ref": "LocalizedMessage" +}, +"quotaInfo": { +"$ref": "QuotaExceededInfo" +} +}, +"type": "object" +}, +"type": "array" +}, +"location": { +"description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", +"type": "string" +}, +"message": { +"description": "[Output Only] An optional, human-readable error message.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +} +}, +"type": "object" +}, +"timestamp": { +"description": "[Output Only] Timestamp of the last progress check of bulk instance operation. Timestamp is in RFC3339 text format.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "InstanceGroupManagerStatusStateful": { "id": "InstanceGroupManagerStatusStateful", "properties": { @@ -62099,6 +62219,26 @@ false }, "type": "object" }, +"InstanceGroupManagerTargetSizePolicy": { +"id": "InstanceGroupManagerTargetSizePolicy", +"properties": { +"mode": { +"description": "The mode of target size policy based on which the MIG creates its VMs individually or all at once.", +"enum": [ +"BULK", +"INDIVIDUAL", +"UNSPECIFIED_MODE" +], +"enumDescriptions": [ +"The mode in which the MIG creates VMs all at once. In this mode, if the MIG is unable to create even one VM, the MIG waits until all VMs can be created at the same time.", +"The mode in which the MIG creates VMs individually. In this mode, if the MIG is unable to create a VM, the MIG will continue to create the other VMs in the group. This is the default mode.", +"If mode is unspecified, MIG will behave as in the default `INDIVIDUAL` mode." +], +"type": "string" +} +}, +"type": "object" +}, "InstanceGroupManagerUpdatePolicy": { "id": "InstanceGroupManagerUpdatePolicy", "properties": { @@ -73118,6 +73258,18 @@ false "format": "byte", "type": "string" }, +"igmpQuery": { +"description": "Indicate whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported.", +"enum": [ +"IGMP_QUERY_DISABLED", +"IGMP_QUERY_V2" +], +"enumDescriptions": [ +"The network interface has disabled IGMP query.", +"The network interface has enabled IGMP query - v2." +], +"type": "string" +}, "internalIpv6PrefixLength": { "description": "The prefix length of the primary internal IPv6 range.", "format": "int32", diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json index 8231928805..7b4d4ed99b 100644 --- a/googleapiclient/discovery_cache/documents/compute.v1.json +++ b/googleapiclient/discovery_cache/documents/compute.v1.json @@ -39717,7 +39717,7 @@ } } }, -"revision": "20250505", +"revision": "20250511", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -43055,7 +43055,7 @@ false "type": "string" }, "network": { -"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL.", +"description": "The URL of the network to which this backend service belongs. This field must be set for Internal Passthrough Network Load Balancers when the haPolicy is enabled, and for External Passthrough Network Load Balancers when the haPolicy fastIpMove is enabled. This field can only be specified when the load balancing scheme is set to INTERNAL, or when the load balancing scheme is set to EXTERNAL and haPolicy fastIpMove is enabled.", "type": "string" }, "outlierDetection": { @@ -44926,6 +44926,7 @@ false "MEMORY_OPTIMIZED", "MEMORY_OPTIMIZED_M3", "MEMORY_OPTIMIZED_M4", +"MEMORY_OPTIMIZED_M4_6TB", "MEMORY_OPTIMIZED_X4_16TB", "MEMORY_OPTIMIZED_X4_24TB", "MEMORY_OPTIMIZED_X4_32TB", @@ -44961,6 +44962,7 @@ false "", "", "", +"", "Note for internal users: When adding a new enum Type for v1, make sure to also add it in the comment for the `optional Type type` definition. This ensures that the public documentation displays the new enum Type." ], "type": "string" @@ -47817,6 +47819,10 @@ false "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default ", "type": "string" }, +"params": { +"$ref": "FirewallParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." +}, "priority": { "description": "Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`.", "format": "int32", @@ -48042,6 +48048,20 @@ false }, "type": "object" }, +"FirewallParams": { +"description": "Additional firewall parameters.", +"id": "FirewallParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"} * {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {\"123/environment\" : \"tagValues/444\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "FirewallPoliciesListAssociationsResponse": { "id": "FirewallPoliciesListAssociationsResponse", "properties": { @@ -74996,6 +75016,10 @@ false "description": "The URL to a VpnTunnel that should handle matching packets.", "type": "string" }, +"params": { +"$ref": "RouteParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." +}, "priority": { "annotations": { "required": [ @@ -75369,6 +75393,20 @@ false }, "type": "object" }, +"RouteParams": { +"description": "Additional route parameters.", +"id": "RouteParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource. Tag keys and values have the same definition as resource manager tags. The field is allowed for INSERT only. The keys/values to set on the resource should be specified in either ID { : } or Namespaced format { : }. For example the following are valid inputs: * {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"} * {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"} Note: * Invalid combinations of ID & namespaced format is not supported. For instance: {\"123/environment\" : \"tagValues/444\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "RoutePolicy": { "id": "RoutePolicy", "properties": { diff --git a/googleapiclient/discovery_cache/documents/connectors.v1.json b/googleapiclient/discovery_cache/documents/connectors.v1.json index 88d529452c..63ff24f418 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v1.json +++ b/googleapiclient/discovery_cache/documents/connectors.v1.json @@ -529,7 +529,7 @@ "type": "string" }, "updateMask": { -"description": "Required. The list of fields to update. Fields are specified relative to the connection. A field will be overwritten if it is in the mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. You can modify only the fields listed below. To lock/unlock a connection: * `lock_config` To suspend/resume a connection: * `suspended` To update the connection details: * `description` * `labels` * `connector_version` * `config_variables` * `auth_config` * `destination_configs` * `node_config` * `log_config` * `ssl_config` * `eventing_enablement_type` * `eventing_config` * `auth_override_enabled`", +"description": "Required. The list of fields to update. Fields are specified relative to the connection. A field will be overwritten if it is in the mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. You can modify only the fields listed below. To lock/unlock a connection: * `lock_config` To suspend/resume a connection: * `suspended` To update the connection details: * `description` * `labels` * `connector_version` * `config_variables` * `auth_config` * `destination_configs` * `node_config` * `log_config` * `ssl_config` * `eventing_enablement_type` * `eventing_config` * `auth_override_enabled` * `async_operations_enabled`", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1506,7 +1506,7 @@ ], "parameters": { "endpointAttachmentId": { -"description": "Required. Identifier to assign to the EndpointAttachment. Must be unique within scope of the parent resource.", +"description": "Required. Identifier to assign to the EndpointAttachment. Must be unique within scope of the parent resource. The regex is: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", "location": "query", "type": "string" }, @@ -2600,21 +2600,6 @@ "required": true, "type": "string" }, -"schemaView": { -"description": "Optional. Enum to control whether schema enrichment related fields should be included in the response.", -"enum": [ -"CONNECTOR_VERSION_SCHEMA_VIEW_UNSPECIFIED", -"CONNECTOR_VERSION_SCHEMA_VIEW_BASIC", -"CONNECTOR_VERSION_SCHEMA_VIEW_ENRICHED" -], -"enumDescriptions": [ -"VIEW_UNSPECIFIED. The unset value. Defaults to BASIC View.", -"Return basic connector version schema.", -"Return enriched connector version schema." -], -"location": "query", -"type": "string" -}, "view": { "description": "Specifies which fields of the ConnectorVersion are returned in the response. Defaults to `CUSTOMER` view.", "enum": [ @@ -2660,27 +2645,11 @@ "type": "string" }, "parent": { -"description": "Required. Parent resource of the connectors, of the form: `projects/*/locations/*/providers/*/connectors/*` Only global location is supported for ConnectorVersion resource.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/providers/[^/]+/connectors/[^/]+$", "required": true, "type": "string" }, -"schemaView": { -"description": "Optional. Enum to control whether schema enrichment related fields should be included in the response.", -"enum": [ -"CONNECTOR_VERSION_SCHEMA_VIEW_UNSPECIFIED", -"CONNECTOR_VERSION_SCHEMA_VIEW_BASIC", -"CONNECTOR_VERSION_SCHEMA_VIEW_ENRICHED" -], -"enumDescriptions": [ -"VIEW_UNSPECIFIED. The unset value. Defaults to BASIC View.", -"Return basic connector version schema.", -"Return enriched connector version schema." -], -"location": "query", -"type": "string" -}, "view": { "description": "Specifies which fields of the ConnectorVersion are returned in the response. Defaults to `BASIC` view.", "enum": [ @@ -2783,7 +2752,7 @@ } } }, -"revision": "20250507", +"revision": "20250521", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "AuditConfig": { @@ -3959,13 +3928,6 @@ "readOnly": true, "type": "string" }, -"priorityEntityTypes": { -"description": "Optional. The priority entity types for the connector version.", -"items": { -"$ref": "PriorityEntityType" -}, -"type": "array" -}, "releaseVersion": { "description": "Output only. ReleaseVersion of the connector, for example: \"1.0.1-alpha\".", "readOnly": true, @@ -7622,30 +7584,6 @@ false }, "type": "object" }, -"PriorityEntityType": { -"description": "PriorityEntityType represents an entity type with its associated priority and order.", -"id": "PriorityEntityType", -"properties": { -"description": { -"description": "The description of the entity type.", -"type": "string" -}, -"id": { -"description": "The entity type.", -"type": "string" -}, -"order": { -"description": "The order of the entity type within its priority group.", -"format": "int32", -"type": "integer" -}, -"priority": { -"description": "The priority of the entity type, such as P0, P1, etc.", -"type": "string" -} -}, -"type": "object" -}, "Provider": { "description": "Provider indicates the owner who provides the connectors.", "id": "Provider", diff --git a/googleapiclient/discovery_cache/documents/connectors.v2.json b/googleapiclient/discovery_cache/documents/connectors.v2.json index b99b748c6d..9e71f83d61 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v2.json +++ b/googleapiclient/discovery_cache/documents/connectors.v2.json @@ -608,6 +608,12 @@ "location": "query", "repeated": true, "type": "string" +}, +"sortOrder": { +"description": "List of 'sort_order' columns to use when returning the results.", +"location": "query", +"repeated": true, +"type": "string" } }, "path": "v2/{+parent}/entities", @@ -690,7 +696,7 @@ } } }, -"revision": "20250507", +"revision": "20250521", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "AccessCredentials": { diff --git a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json index 3cd6fc873a..2abd34e818 100644 --- a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json +++ b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json @@ -500,6 +500,163 @@ } } }, +"assessmentRules": { +"methods": { +"create": { +"description": "Creates an assessment rule.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/assessmentRules", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.assessmentRules.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"assessmentRuleId": { +"description": "Optional. A unique ID for the new AssessmentRule. This ID will become the final component of the AssessmentRule's resource name. If no ID is specified, a server-generated ID will be used. This value should be 4-64 characters and must match the regular expression `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the assessment rule. Required. The location to create a assessment rule for. Format: `projects//locations/` or `projects//locations/`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/assessmentRules", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes an assessment rule.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/assessmentRules/{assessmentRulesId}", +"httpMethod": "DELETE", +"id": "contactcenterinsights.projects.locations.assessmentRules.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the assessment rule to delete.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/assessmentRules/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Get an assessment rule.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/assessmentRules/{assessmentRulesId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.assessmentRules.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the assessment rule to get.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/assessmentRules/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists assessment rules.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/assessmentRules", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.assessmentRules.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The maximum number of assessment rule to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the assessment rules.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/assessmentRules", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1ListAssessmentRulesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates an assessment rule.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/assessmentRules/{assessmentRulesId}", +"httpMethod": "PATCH", +"id": "contactcenterinsights.projects.locations.assessmentRules.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/assessmentRules/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to be updated. If the update_mask is not provided, the update will be applied to all fields.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "authorizedViewSets": { "methods": { "create": { @@ -1229,54 +1386,82 @@ } } }, -"feedbackLabels": { +"assessments": { "methods": { -"create": { -"description": "Create feedback label.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels", +"appeal": { +"description": "Appeal an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}:appeal", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.create", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.appeal", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"feedbackLabelId": { -"description": "Optional. The ID of the feedback label to create. If one is not specified it will be generated by the server.", -"location": "query", +"name": { +"description": "Required. The name of the assessment to appeal.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", +"required": true, "type": "string" +} +}, +"path": "v1/{+name}:appeal", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1AppealAssessmentRequest" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] }, +"create": { +"description": "Create Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.create", +"parameterOrder": [ +"parent" +], +"parameters": { "parent": { -"description": "Required. The parent resource of the feedback label.", +"description": "Required. The parent resource of the assessment.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/feedbackLabels", +"path": "v1/{+parent}/assessments", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "response": { -"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { -"description": "Delete feedback label.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", +"description": "Delete an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}", "httpMethod": "DELETE", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.delete", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.delete", "parameterOrder": [ "name" ], "parameters": { +"force": { +"description": "Optional. If set to true, all of this assessment's notes will also be deleted. Otherwise, the request will only succeed if it has no notes.", +"location": "query", +"type": "boolean" +}, "name": { -"description": "Required. The name of the feedback label to delete.", +"description": "Required. The name of the assessment to delete.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } @@ -1289,198 +1474,414 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"finalize": { +"description": "Finalize an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}:finalize", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.finalize", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the assessment to finalize.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:finalize", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1FinalizeAssessmentRequest" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "get": { -"description": "Get feedback label.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", +"description": "Get Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.get", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the feedback label to get.", +"description": "Required. The name of the assessment to get.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { -"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "List feedback labels.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels", +"description": "List Assessments.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.list", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.list", "parameterOrder": [ "parent" ], "parameters": { "filter": { -"description": "Optional. A filter to reduce results to a specific subset. Supports disjunctions (OR) and conjunctions (AND). Automatically sorts by conversation ID. To sort by all feedback labels in a project see ListAllFeedbackLabels. Supported fields: * `issue_model_id` * `qa_question_id` * `qa_scorecard_id` * `min_create_time` * `max_create_time` * `min_update_time` * `max_update_time` * `feedback_label_type`: QUALITY_AI, TOPIC_MODELING", +"description": "Optional. A filter to reduce results to a specific subset. Supported filters include: * `state` - The state of the assessment * `agent_info.agent_id` - The ID of the agent the assessment is for", "location": "query", "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of feedback labels to return in the response. A valid page size ranges from 0 to 100,000 inclusive. If the page size is zero or unspecified, a default page size of 100 will be chosen. Note that a call might return fewer results than the requested page size.", +"description": "The maximum number of assessments to list. If zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "Optional. The value returned by the last `ListFeedbackLabelsResponse`. This value indicates that this is a continuation of a prior `ListFeedbackLabels` call and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The parent resource of the feedback labels.", +"description": "Required. The parent resource of the assessments. To list all assessments in a location, substitute the conversation ID with a '-' character.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/feedbackLabels", +"path": "v1/{+parent}/assessments", "response": { -"$ref": "GoogleCloudContactcenterinsightsV1ListFeedbackLabelsResponse" +"$ref": "GoogleCloudContactcenterinsightsV1ListAssessmentsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"patch": { -"description": "Update feedback label.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", -"httpMethod": "PATCH", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.patch", +"publish": { +"description": "Publish an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}:publish", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.publish", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Immutable. Resource name of the FeedbackLabel. Format: projects/{project}/locations/{location}/conversations/{conversation}/feedbackLabels/{feedback_label}", +"description": "Required. The name of the assessment to publish.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" -}, -"updateMask": { -"description": "Required. The list of fields to be updated.", -"format": "google-fieldmask", -"location": "query", -"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:publish", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +"$ref": "GoogleCloudContactcenterinsightsV1PublishAssessmentRequest" }, "response": { -"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } -} -} -} }, -"operations": { +"resources": { +"notes": { "methods": { -"cancel": { -"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations/{operationsId}:cancel", +"create": { +"description": "Create Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.cancel", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.create", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"name": { -"description": "The name of the operation resource to be cancelled.", +"parent": { +"description": "Required. The parent resource of the note.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+name}:cancel", +"path": "v1/{+parent}/notes", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1Note" +}, "response": { -"$ref": "GoogleProtobufEmpty" +"$ref": "GoogleCloudContactcenterinsightsV1Note" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations/{operationsId}", -"httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.get", +"delete": { +"description": "Deletes a Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes/{notesId}", +"httpMethod": "DELETE", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource.", +"description": "Required. The name of the note to delete.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+/notes/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleProtobufEmpty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations", +"description": "List Notes.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.list", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.list", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"filter": { -"description": "The standard list filter.", +"pageSize": { +"description": "Optional. The maximum number of notes to return in the response. If zero the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", +"format": "int32", "location": "query", -"type": "string" +"type": "integer" }, -"name": { -"description": "The name of the operation's parent resource.", +"pageToken": { +"description": "Optional. The value returned by the last `ListNotesResponse`. This value indicates that this is a continuation of a prior `ListNotes` call and that the system should return the next page of data.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the notes.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/notes", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1ListNotesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Update Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes/{notesId}", +"httpMethod": "PATCH", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.assessments.notes.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/assessments/[^/]+/notes/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to be updated. If the update_mask is empty, all updateable fields will be updated. Acceptable fields include: * `content`", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1Note" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Note" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, +"feedbackLabels": { +"methods": { +"create": { +"description": "Create feedback label.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"feedbackLabelId": { +"description": "Optional. The ID of the feedback label to create. If one is not specified it will be generated by the server.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the feedback label.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/feedbackLabels", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Delete feedback label.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", +"httpMethod": "DELETE", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the feedback label to delete.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Get feedback label.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the feedback label to get.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", "required": true, "type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "List feedback labels.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. A filter to reduce results to a specific subset. Supports disjunctions (OR) and conjunctions (AND). Automatically sorts by conversation ID. To sort by all feedback labels in a project see ListAllFeedbackLabels. Supported fields: * `issue_model_id` * `qa_question_id` * `qa_scorecard_id` * `min_create_time` * `max_create_time` * `min_update_time` * `max_update_time` * `feedback_label_type`: QUALITY_AI, TOPIC_MODELING", +"location": "query", +"type": "string" }, "pageSize": { -"description": "The standard list page size.", +"description": "Optional. The maximum number of feedback labels to return in the response. A valid page size ranges from 0 to 100,000 inclusive. If the page size is zero or unspecified, a default page size of 100 will be chosen. Note that a call might return fewer results than the requested page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The standard list page token.", +"description": "Optional. The value returned by the last `ListFeedbackLabelsResponse`. This value indicates that this is a continuation of a prior `ListFeedbackLabels` call and that the system should return the next page of data.", "location": "query", "type": "string" +}, +"parent": { +"description": "Required. The parent resource of the feedback labels.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+$", +"required": true, +"type": "string" } }, -"path": "v1/{+name}/operations", +"path": "v1/{+parent}/feedbackLabels", "response": { -"$ref": "GoogleLongrunningListOperationsResponse" +"$ref": "GoogleCloudContactcenterinsightsV1ListFeedbackLabelsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Update feedback label.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/conversations/{conversationsId}/feedbackLabels/{feedbackLabelsId}", +"httpMethod": "PATCH", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.conversations.feedbackLabels.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Immutable. Resource name of the FeedbackLabel. Format: projects/{project}/locations/{location}/conversations/{conversation}/feedbackLabels/{feedback_label}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/conversations/[^/]+/feedbackLabels/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Required. The list of fields to be updated.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1FeedbackLabel" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -1489,31 +1890,472 @@ } } } +}, +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:cancel", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" } +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/authorizedViewSets/{authorizedViewSetsId}/authorizedViews/{authorizedViewsId}/operations", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.authorizedViewSets.authorizedViews.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/authorizedViewSets/[^/]+/authorizedViews/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, +"conversations": { +"methods": { +"bulkAnalyze": { +"description": "Analyzes multiple conversations in a single request.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:bulkAnalyze", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.bulkAnalyze", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource to create analyses in.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/conversations:bulkAnalyze", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1BulkAnalyzeConversationsRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"bulkDelete": { +"description": "Deletes multiple conversations in a single request.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:bulkDelete", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.bulkDelete", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource to delete conversations from. Format: projects/{project}/locations/{location}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/conversations:bulkDelete", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1BulkDeleteConversationsRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"calculateStats": { +"description": "Gets conversation statistics.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:calculateStats", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.conversations.calculateStats", +"parameterOrder": [ +"location" +], +"parameters": { +"filter": { +"description": "A filter to reduce results to a specific subset. This field is useful for getting statistics about conversations with specific properties.", +"location": "query", +"type": "string" +}, +"location": { +"description": "Required. The location of the conversations.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+location}/conversations:calculateStats", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1CalculateStatsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"create": { +"description": "Creates a conversation. Note that this method does not support audio transcription or redaction. Use `conversations.upload` instead.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"conversationId": { +"description": "A unique ID for the new conversation. This ID will become the final component of the conversation's resource name. If no ID is specified, a server-generated ID will be used. This value should be 4-64 characters and must match the regular expression `^[a-z0-9-]{4,64}$`. Valid characters are `a-z-`", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the conversation.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/conversations", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a conversation.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", +"httpMethod": "DELETE", +"id": "contactcenterinsights.projects.locations.conversations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"force": { +"description": "If set to true, all of this conversation's analyses will also be deleted. Otherwise, the request will only succeed if the conversation has no analyses.", +"location": "query", +"type": "boolean" +}, +"name": { +"description": "Required. The name of the conversation to delete.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a conversation.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.conversations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the conversation to get.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"required": true, +"type": "string" +}, +"view": { +"description": "The level of details of the conversation. Default is `FULL`.", +"enum": [ +"CONVERSATION_VIEW_UNSPECIFIED", +"FULL", +"BASIC" +], +"enumDescriptions": [ +"The conversation view is not specified. * Defaults to `FULL` in `GetConversationRequest`. * Defaults to `BASIC` in `ListConversationsRequest`.", +"Populates all fields in the conversation.", +"Populates all fields in the conversation except the transcript." +], +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"ingest": { +"description": "Imports conversations and processes them according to the user's configuration.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:ingest", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.ingest", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource for new conversations.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/conversations:ingest", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1IngestConversationsRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists conversations.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.conversations.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "A filter to reduce results to a specific subset. Useful for querying conversations with specific properties.", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. The attribute by which to order conversations in the response. If empty, conversations will be ordered by descending creation time. Supported values are one of the following: * create_time * customer_satisfaction_rating * duration * latest_analysis * start_time * turn_count The default sort order is ascending. To specify order, append `asc` or `desc` (`create_time desc`). For more details, see [Google AIPs Ordering](https://google.aip.dev/132#ordering).", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "The maximum number of conversations to return in the response. A valid page size ranges from 0 to 100,000 inclusive. If the page size is zero or unspecified, a default page size of 100 will be chosen. Note that a call might return fewer results than the requested page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The value returned by the last `ListConversationsResponse`. This value indicates that this is a continuation of a prior `ListConversations` call and that the system should return the next page of data.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the conversation.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"view": { +"description": "The level of details of the conversation. Default is `BASIC`.", +"enum": [ +"CONVERSATION_VIEW_UNSPECIFIED", +"FULL", +"BASIC" +], +"enumDescriptions": [ +"The conversation view is not specified. * Defaults to `FULL` in `GetConversationRequest`. * Defaults to `BASIC` in `ListConversationsRequest`.", +"Populates all fields in the conversation.", +"Populates all fields in the conversation except the transcript." +], +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/conversations", +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1ListConversationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates a conversation.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", +"httpMethod": "PATCH", +"id": "contactcenterinsights.projects.locations.conversations.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Immutable. The resource name of the conversation. Format: projects/{project}/locations/{location}/conversations/{conversation}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "The list of fields to be updated. All possible fields can be updated by passing `*`, or a subset of the following updateable fields can be provided: * `agent_id` * `language_code` * `labels` * `metadata` * `quality_metadata` * `call_metadata` * `start_time` * `expire_time` or `ttl` * `data_source.gcs_source.audio_uri` or `data_source.dialogflow_source.audio_uri`", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"upload": { +"description": "Create a long-running conversation upload operation. This method differs from `CreateConversation` by allowing audio transcription and optional DLP redaction.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:upload", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.upload", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource of the conversation.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/conversations:upload", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1UploadConversationRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } }, -"conversations": { +"resources": { +"analyses": { "methods": { -"bulkAnalyze": { -"description": "Analyzes multiple conversations in a single request.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:bulkAnalyze", +"create": { +"description": "Creates an analysis. The long running operation is done when the analysis has completed.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.bulkAnalyze", +"id": "contactcenterinsights.projects.locations.conversations.analyses.create", "parameterOrder": [ "parent" ], "parameters": { "parent": { -"description": "Required. The parent resource to create analyses in.", +"description": "Required. The parent resource of the analysis.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/conversations:bulkAnalyze", +"path": "v1/{+parent}/analyses", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1BulkAnalyzeConversationsRequest" +"$ref": "GoogleCloudContactcenterinsightsV1Analysis" }, "response": { "$ref": "GoogleLongrunningOperation" @@ -1522,115 +2364,175 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"bulkDelete": { -"description": "Deletes multiple conversations in a single request.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:bulkDelete", -"httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.bulkDelete", +"delete": { +"description": "Deletes an analysis.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses/{analysesId}", +"httpMethod": "DELETE", +"id": "contactcenterinsights.projects.locations.conversations.analyses.delete", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The parent resource to delete conversations from. Format: projects/{project}/locations/{location}", +"name": { +"description": "Required. The name of the analysis to delete.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/analyses/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/conversations:bulkDelete", -"request": { -"$ref": "GoogleCloudContactcenterinsightsV1BulkDeleteConversationsRequest" +"path": "v1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets an analysis.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses/{analysesId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.conversations.analyses.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the analysis to get.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/analyses/[^/]+$", +"required": true, +"type": "string" +} }, +"path": "v1/{+name}", "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleCloudContactcenterinsightsV1Analysis" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"calculateStats": { -"description": "Gets conversation statistics.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:calculateStats", +"list": { +"description": "Lists analyses.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.conversations.calculateStats", +"id": "contactcenterinsights.projects.locations.conversations.analyses.list", "parameterOrder": [ -"location" +"parent" ], "parameters": { "filter": { -"description": "A filter to reduce results to a specific subset. This field is useful for getting statistics about conversations with specific properties.", +"description": "A filter to reduce results to a specific subset. Useful for querying conversations with specific properties.", "location": "query", "type": "string" }, -"location": { -"description": "Required. The location of the conversations.", +"pageSize": { +"description": "The maximum number of analyses to return in the response. If this value is zero, the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The value returned by the last `ListAnalysesResponse`; indicates that this is a continuation of a prior `ListAnalyses` call and the system should return the next page of data.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the analyses.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+location}/conversations:calculateStats", +"path": "v1/{+parent}/analyses", "response": { -"$ref": "GoogleCloudContactcenterinsightsV1CalculateStatsResponse" +"$ref": "GoogleCloudContactcenterinsightsV1ListAnalysesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} +} }, -"create": { -"description": "Creates a conversation. Note that this method does not support audio transcription or redaction. Use `conversations.upload` instead.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations", +"assessments": { +"methods": { +"appeal": { +"description": "Appeal an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}:appeal", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.create", +"id": "contactcenterinsights.projects.locations.conversations.assessments.appeal", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"conversationId": { -"description": "A unique ID for the new conversation. This ID will become the final component of the conversation's resource name. If no ID is specified, a server-generated ID will be used. This value should be 4-64 characters and must match the regular expression `^[a-z0-9-]{4,64}$`. Valid characters are `a-z-`", -"location": "query", +"name": { +"description": "Required. The name of the assessment to appeal.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", +"required": true, "type": "string" +} +}, +"path": "v1/{+name}:appeal", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1AppealAssessmentRequest" +}, +"response": { +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] }, +"create": { +"description": "Create Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.assessments.create", +"parameterOrder": [ +"parent" +], +"parameters": { "parent": { -"description": "Required. The parent resource of the conversation.", +"description": "Required. The parent resource of the assessment.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/conversations", +"path": "v1/{+parent}/assessments", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "response": { -"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { -"description": "Deletes a conversation.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", +"description": "Delete an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}", "httpMethod": "DELETE", -"id": "contactcenterinsights.projects.locations.conversations.delete", +"id": "contactcenterinsights.projects.locations.conversations.assessments.delete", "parameterOrder": [ "name" ], "parameters": { "force": { -"description": "If set to true, all of this conversation's analyses will also be deleted. Otherwise, the request will only succeed if the conversation has no analyses.", +"description": "Optional. If set to true, all of this assessment's notes will also be deleted. Otherwise, the request will only succeed if it has no notes.", "location": "query", "type": "boolean" }, "name": { -"description": "Required. The name of the conversation to delete.", +"description": "Required. The name of the assessment to delete.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } @@ -1643,192 +2545,123 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets a conversation.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", -"httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.conversations.get", +"finalize": { +"description": "Finalize an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}:finalize", +"httpMethod": "POST", +"id": "contactcenterinsights.projects.locations.conversations.assessments.finalize", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the conversation to get.", +"description": "Required. The name of the assessment to finalize.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" -}, -"view": { -"description": "The level of details of the conversation. Default is `FULL`.", -"enum": [ -"CONVERSATION_VIEW_UNSPECIFIED", -"FULL", -"BASIC" -], -"enumDescriptions": [ -"The conversation view is not specified. * Defaults to `FULL` in `GetConversationRequest`. * Defaults to `BASIC` in `ListConversationsRequest`.", -"Populates all fields in the conversation.", -"Populates all fields in the conversation except the transcript." -], -"location": "query", -"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:finalize", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1FinalizeAssessmentRequest" +}, "response": { -"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"ingest": { -"description": "Imports conversations and processes them according to the user's configuration.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:ingest", -"httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.ingest", +"get": { +"description": "Get Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}", +"httpMethod": "GET", +"id": "contactcenterinsights.projects.locations.conversations.assessments.get", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The parent resource for new conversations.", +"name": { +"description": "Required. The name of the assessment to get.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/conversations:ingest", -"request": { -"$ref": "GoogleCloudContactcenterinsightsV1IngestConversationsRequest" -}, +"path": "v1/{+name}", "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists conversations.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations", +"description": "List Assessments.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.conversations.list", +"id": "contactcenterinsights.projects.locations.conversations.assessments.list", "parameterOrder": [ "parent" ], "parameters": { "filter": { -"description": "A filter to reduce results to a specific subset. Useful for querying conversations with specific properties.", -"location": "query", -"type": "string" -}, -"orderBy": { -"description": "Optional. The attribute by which to order conversations in the response. If empty, conversations will be ordered by descending creation time. Supported values are one of the following: * create_time * customer_satisfaction_rating * duration * latest_analysis * start_time * turn_count The default sort order is ascending. To specify order, append `asc` or `desc` (`create_time desc`). For more details, see [Google AIPs Ordering](https://google.aip.dev/132#ordering).", +"description": "Optional. A filter to reduce results to a specific subset. Supported filters include: * `state` - The state of the assessment * `agent_info.agent_id` - The ID of the agent the assessment is for", "location": "query", "type": "string" }, "pageSize": { -"description": "The maximum number of conversations to return in the response. A valid page size ranges from 0 to 100,000 inclusive. If the page size is zero or unspecified, a default page size of 100 will be chosen. Note that a call might return fewer results than the requested page size.", +"description": "The maximum number of assessments to list. If zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The value returned by the last `ListConversationsResponse`. This value indicates that this is a continuation of a prior `ListConversations` call and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListAssessmentRulesResponse`; indicates that this is a continuation of a prior `ListAssessmentRules` call and the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The parent resource of the conversation.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", -"required": true, -"type": "string" -}, -"view": { -"description": "The level of details of the conversation. Default is `BASIC`.", -"enum": [ -"CONVERSATION_VIEW_UNSPECIFIED", -"FULL", -"BASIC" -], -"enumDescriptions": [ -"The conversation view is not specified. * Defaults to `FULL` in `GetConversationRequest`. * Defaults to `BASIC` in `ListConversationsRequest`.", -"Populates all fields in the conversation.", -"Populates all fields in the conversation except the transcript." -], -"location": "query", -"type": "string" -} -}, -"path": "v1/{+parent}/conversations", -"response": { -"$ref": "GoogleCloudContactcenterinsightsV1ListConversationsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"patch": { -"description": "Updates a conversation.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}", -"httpMethod": "PATCH", -"id": "contactcenterinsights.projects.locations.conversations.patch", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Immutable. The resource name of the conversation. Format: projects/{project}/locations/{location}/conversations/{conversation}", +"description": "Required. The parent resource of the assessments. To list all assessments in a location, substitute the conversation ID with a '-' character.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", "required": true, -"type": "string" -}, -"updateMask": { -"description": "The list of fields to be updated. All possible fields can be updated by passing `*`, or a subset of the following updateable fields can be provided: * `agent_id` * `language_code` * `labels` * `metadata` * `quality_metadata` * `call_metadata` * `start_time` * `expire_time` or `ttl` * `data_source.gcs_source.audio_uri` or `data_source.dialogflow_source.audio_uri`", -"format": "google-fieldmask", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}", -"request": { -"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +"type": "string" +} }, +"path": "v1/{+parent}/assessments", "response": { -"$ref": "GoogleCloudContactcenterinsightsV1Conversation" +"$ref": "GoogleCloudContactcenterinsightsV1ListAssessmentsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"upload": { -"description": "Create a long-running conversation upload operation. This method differs from `CreateConversation` by allowing audio transcription and optional DLP redaction.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations:upload", +"publish": { +"description": "Publish an Assessment.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}:publish", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.upload", +"id": "contactcenterinsights.projects.locations.conversations.assessments.publish", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The parent resource of the conversation.", +"name": { +"description": "Required. The name of the assessment to publish.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/conversations:upload", +"path": "v1/{+name}:publish", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1UploadConversationRequest" +"$ref": "GoogleCloudContactcenterinsightsV1PublishAssessmentRequest" }, "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -1836,49 +2669,49 @@ } }, "resources": { -"analyses": { +"notes": { "methods": { "create": { -"description": "Creates an analysis. The long running operation is done when the analysis has completed.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses", +"description": "Create Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes", "httpMethod": "POST", -"id": "contactcenterinsights.projects.locations.conversations.analyses.create", +"id": "contactcenterinsights.projects.locations.conversations.assessments.notes.create", "parameterOrder": [ "parent" ], "parameters": { "parent": { -"description": "Required. The parent resource of the analysis.", +"description": "Required. The parent resource of the note.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/analyses", +"path": "v1/{+parent}/notes", "request": { -"$ref": "GoogleCloudContactcenterinsightsV1Analysis" +"$ref": "GoogleCloudContactcenterinsightsV1Note" }, "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleCloudContactcenterinsightsV1Note" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { -"description": "Deletes an analysis.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses/{analysesId}", +"description": "Deletes a Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes/{notesId}", "httpMethod": "DELETE", -"id": "contactcenterinsights.projects.locations.conversations.analyses.delete", +"id": "contactcenterinsights.projects.locations.conversations.assessments.notes.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the analysis to delete.", +"description": "Required. The name of the note to delete.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/analyses/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+/notes/[^/]+$", "required": true, "type": "string" } @@ -1891,73 +2724,79 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets an analysis.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses/{analysesId}", +"list": { +"description": "List Notes.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes", "httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.conversations.analyses.get", +"id": "contactcenterinsights.projects.locations.conversations.assessments.notes.list", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"name": { -"description": "Required. The name of the analysis to get.", +"pageSize": { +"description": "Optional. The maximum number of notes to return in the response. If zero the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The value returned by the last `ListNotesResponse`. This value indicates that this is a continuation of a prior `ListNotes` call and that the system should return the next page of data.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource of the notes.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/analyses/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+parent}/notes", "response": { -"$ref": "GoogleCloudContactcenterinsightsV1Analysis" +"$ref": "GoogleCloudContactcenterinsightsV1ListNotesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists analyses.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/analyses", -"httpMethod": "GET", -"id": "contactcenterinsights.projects.locations.conversations.analyses.list", +"patch": { +"description": "Update Note.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/conversations/{conversationsId}/assessments/{assessmentsId}/notes/{notesId}", +"httpMethod": "PATCH", +"id": "contactcenterinsights.projects.locations.conversations.assessments.notes.patch", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"filter": { -"description": "A filter to reduce results to a specific subset. Useful for querying conversations with specific properties.", -"location": "query", +"name": { +"description": "Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+/assessments/[^/]+/notes/[^/]+$", +"required": true, "type": "string" }, -"pageSize": { -"description": "The maximum number of analyses to return in the response. If this value is zero, the service will select a default size. A call might return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "The value returned by the last `ListAnalysesResponse`; indicates that this is a continuation of a prior `ListAnalyses` call and the system should return the next page of data.", +"updateMask": { +"description": "Optional. The list of fields to be updated. If the update_mask is empty, all updateable fields will be updated. Acceptable fields include: * `content`", +"format": "google-fieldmask", "location": "query", "type": "string" -}, -"parent": { -"description": "Required. The parent resource of the analyses.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/conversations/[^/]+$", -"required": true, -"type": "string" } }, -"path": "v1/{+parent}/analyses", +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudContactcenterinsightsV1Note" +}, "response": { -"$ref": "GoogleCloudContactcenterinsightsV1ListAnalysesResponse" +"$ref": "GoogleCloudContactcenterinsightsV1Note" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } +} +} }, "feedbackLabels": { "methods": { @@ -4254,7 +5093,7 @@ } } }, -"revision": "20250512", +"revision": "20250521", "rootUrl": "https://contactcenterinsights.googleapis.com/", "schemas": { "GoogleCloudContactcenterinsightsV1Analysis": { @@ -4532,10 +5371,15 @@ "BASELINE_MODEL", "BASELINE_MODEL_V2_0" ], +"enumDeprecated": [ +false, +true, +true +], "enumDescriptions": [ "Unspecified summarization model.", -"The CCAI baseline model.", -"The CCAI baseline model, V2.0." +"The CCAI baseline model. This model is deprecated and will be removed in the future. We recommend using `generator` instead.", +"The CCAI baseline model, V2.0. This model is deprecated and will be removed in the future. We recommend using `generator` instead." ], "type": "string" } @@ -4573,6 +5417,12 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1AppealAssessmentRequest": { +"description": "The message to appeal an assessment.", +"id": "GoogleCloudContactcenterinsightsV1AppealAssessmentRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1ArticleSuggestionData": { "description": "Agent Assist Article Suggestion data.", "id": "GoogleCloudContactcenterinsightsV1ArticleSuggestionData", @@ -4608,6 +5458,91 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1Assessment": { +"description": "The assessment resource.", +"id": "GoogleCloudContactcenterinsightsV1Assessment", +"properties": { +"agentInfo": { +"$ref": "GoogleCloudContactcenterinsightsV1ConversationQualityMetadataAgentInfo", +"description": "Information about the agent the assessment is for." +}, +"createTime": { +"description": "Output only. The time at which the assessment was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"name": { +"description": "Identifier. The resource name of the assessment. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}", +"type": "string" +}, +"state": { +"description": "Output only. The state of the assessment.", +"enum": [ +"STATE_UNSPECIFIED", +"DRAFT", +"PUBLISHED", +"APPEALED", +"FINALIZED" +], +"enumDescriptions": [ +"The state is unspecified. This value should not be used.", +"The default state of all new assessments.", +"The assessment has been published.", +"The assessment has been appealed.", +"The assessment has been finalized." +], +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. The time at which the assessment was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudContactcenterinsightsV1AssessmentRule": { +"description": "The CCAI Insights project wide assessment rule. This assessment rule will be applied to all conversations from the previous sampling cycle that match the sample rule defined in the assessment rule. One project can have multiple assessment rules.", +"id": "GoogleCloudContactcenterinsightsV1AssessmentRule", +"properties": { +"active": { +"description": "If true, apply this rule to conversations. Otherwise, this rule is inactive.", +"type": "boolean" +}, +"createTime": { +"description": "Output only. The time at which this assessment rule was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"displayName": { +"description": "Display Name of the assessment rule.", +"type": "string" +}, +"name": { +"description": "Identifier. The resource name of the assessment rule. Format: projects/{project}/locations/{location}/assessmentRules/{assessment_rule}", +"type": "string" +}, +"sampleRule": { +"$ref": "GoogleCloudContactcenterinsightsV1SampleRule", +"description": "The sample rule for the assessment rule." +}, +"scheduleInfo": { +"$ref": "GoogleCloudContactcenterinsightsV1ScheduleInfo", +"description": "Schedule info for the assessment rule." +}, +"updateTime": { +"description": "Output only. The most recent time at which this assessment rule was updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1AuthorizedView": { "description": "An AuthorizedView represents a view of accessible Insights resources (for example, Conversation and Scorecard). Who have read access to the AuthorizedView resource will have access to these Insight resources as well.", "id": "GoogleCloudContactcenterinsightsV1AuthorizedView", @@ -6486,6 +7421,12 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1FinalizeAssessmentRequest": { +"description": "The message to finalize an assessment. Finalizing makes an assessment and its notes immutable.", +"id": "GoogleCloudContactcenterinsightsV1FinalizeAssessmentRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1GcsSource": { "description": "A Cloud Storage source of conversation data.", "id": "GoogleCloudContactcenterinsightsV1GcsSource", @@ -7136,6 +8077,42 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1ListAssessmentRulesResponse": { +"description": "The response of listing assessment rules.", +"id": "GoogleCloudContactcenterinsightsV1ListAssessmentRulesResponse", +"properties": { +"assessmentRules": { +"description": "The assessment rules that match the request.", +"items": { +"$ref": "GoogleCloudContactcenterinsightsV1AssessmentRule" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudContactcenterinsightsV1ListAssessmentsResponse": { +"description": "The response of listing assessments.", +"id": "GoogleCloudContactcenterinsightsV1ListAssessmentsResponse", +"properties": { +"assessments": { +"description": "The assessments that match the request.", +"items": { +"$ref": "GoogleCloudContactcenterinsightsV1Assessment" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1ListAuthorizedViewSetsResponse": { "description": "The response from a ListAuthorizedViewSet request.", "id": "GoogleCloudContactcenterinsightsV1ListAuthorizedViewSetsResponse", @@ -7236,6 +8213,24 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1ListNotesResponse": { +"description": "The response of listing notes.", +"id": "GoogleCloudContactcenterinsightsV1ListNotesResponse", +"properties": { +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +}, +"notes": { +"description": "The notes that match the request.", +"items": { +"$ref": "GoogleCloudContactcenterinsightsV1Note" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1ListPhraseMatchersResponse": { "description": "The response of listing phrase matchers.", "id": "GoogleCloudContactcenterinsightsV1ListPhraseMatchersResponse", @@ -7326,6 +8321,79 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1Note": { +"description": "The conversation assessment note resource.", +"id": "GoogleCloudContactcenterinsightsV1Note", +"properties": { +"assessmentNote": { +"$ref": "GoogleCloudContactcenterinsightsV1NoteAssessmentNote", +"description": "The note is associated to the entire parent assessment." +}, +"content": { +"description": "The note content.", +"type": "string" +}, +"conversationTurnNote": { +"$ref": "GoogleCloudContactcenterinsightsV1NoteConversationTurnNote", +"description": "The note is associated with a conversation turn." +}, +"createTime": { +"description": "Output only. The time at which the note was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"name": { +"description": "Identifier. The resource name of the note. Format: projects/{project}/locations/{location}/conversations/{conversation}/assessments/{assessment}/notes/{note}", +"type": "string" +}, +"noteCreator": { +"$ref": "GoogleCloudContactcenterinsightsV1UserInfo", +"description": "Output only. The user that created the note.", +"readOnly": true +}, +"qaQuestionNote": { +"$ref": "GoogleCloudContactcenterinsightsV1NoteQaQuestionNote", +"description": "The note is associated with a QA question in one of the conversation's scorecard results." +}, +"updateTime": { +"description": "Output only. The time at which the note was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudContactcenterinsightsV1NoteAssessmentNote": { +"description": "A note about the entire parent assessment.", +"id": "GoogleCloudContactcenterinsightsV1NoteAssessmentNote", +"properties": {}, +"type": "object" +}, +"GoogleCloudContactcenterinsightsV1NoteConversationTurnNote": { +"description": "A note about a conversation turn.", +"id": "GoogleCloudContactcenterinsightsV1NoteConversationTurnNote", +"properties": { +"turnIndex": { +"description": "The conversation turn index that the note is associated with.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudContactcenterinsightsV1NoteQaQuestionNote": { +"description": "A note about a QA question.", +"id": "GoogleCloudContactcenterinsightsV1NoteQaQuestionNote", +"properties": { +"qaQuestion": { +"description": "The question resource that the note is associated with.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1PhraseMatchData": { "description": "The data for a matched phrase matcher. Represents information identifying a phrase matcher for a given match.", "id": "GoogleCloudContactcenterinsightsV1PhraseMatchData", @@ -7484,6 +8552,12 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1PublishAssessmentRequest": { +"description": "The message to publish an assessment. Draft and appealed assessments can be published. Publishing simply changes the state of the assessment to published, allowing the console and authorized views to filter on the state.", +"id": "GoogleCloudContactcenterinsightsV1PublishAssessmentRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1QaAnswer": { "description": "An answer to a QaQuestion.", "id": "GoogleCloudContactcenterinsightsV1QaAnswer", @@ -8386,6 +9460,31 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1ScheduleInfo": { +"description": "Message for schedule info.", +"id": "GoogleCloudContactcenterinsightsV1ScheduleInfo", +"properties": { +"endTime": { +"description": "End time of the schedule. If not specified, will keep scheduling new pipelines for execution util the schedule is no longer active or deleted.", +"format": "google-datetime", +"type": "string" +}, +"schedule": { +"description": "The groc expression. Format: `every number [synchronized]` Time units can be: minutes, hours Synchronized is optional and indicates that the schedule should be synchronized to the start of the interval: every 5 minutes synchronized means 00:00, 00:05 ... Otherwise the start time is random within the interval. Example: `every 5 minutes` could be 00:02, 00:07, 00:12, ...", +"type": "string" +}, +"startTime": { +"description": "Start time of the schedule. If not specified, will start as soon as the schedule is created.", +"format": "google-datetime", +"type": "string" +}, +"timeZone": { +"description": "The timezone to use for the groc expression. If not specified, defaults to UTC.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1SearchAuthorizedViewsResponse": { "description": "The response from a ListAuthorizedViews request.", "id": "GoogleCloudContactcenterinsightsV1SearchAuthorizedViewsResponse", @@ -8686,6 +9785,17 @@ }, "type": "object" }, +"GoogleCloudContactcenterinsightsV1UserInfo": { +"description": "Information about a user.", +"id": "GoogleCloudContactcenterinsightsV1UserInfo", +"properties": { +"username": { +"description": "The user's username.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudContactcenterinsightsV1View": { "description": "The View resource.", "id": "GoogleCloudContactcenterinsightsV1View", @@ -8948,10 +10058,15 @@ "BASELINE_MODEL", "BASELINE_MODEL_V2_0" ], +"enumDeprecated": [ +false, +true, +true +], "enumDescriptions": [ "Unspecified summarization model.", -"The CCAI baseline model.", -"The CCAI baseline model, V2.0." +"The CCAI baseline model. This model is deprecated and will be removed in the future. We recommend using `generator` instead.", +"The CCAI baseline model, V2.0. This model is deprecated and will be removed in the future. We recommend using `generator` instead." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index 7ecb3954bc..db68d40ee1 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2660,7 +2660,7 @@ } } }, -"revision": "20250429", +"revision": "20250513", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2783,6 +2783,10 @@ "$ref": "GkeBackupAgentConfig", "description": "Configuration for the Backup for GKE agent addon." }, +"highScaleCheckpointingConfig": { +"$ref": "HighScaleCheckpointingConfig", +"description": "Configuration for the High Scale Checkpointing add-on." +}, "horizontalPodAutoscaling": { "$ref": "HorizontalPodAutoscaling", "description": "Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods." @@ -2854,6 +2858,22 @@ "description": "Whether or not to enable nested virtualization (defaults to false).", "type": "boolean" }, +"performanceMonitoringUnit": { +"description": "Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node.", +"enum": [ +"PERFORMANCE_MONITORING_UNIT_UNSPECIFIED", +"ARCHITECTURAL", +"STANDARD", +"ENHANCED" +], +"enumDescriptions": [ +"PMU not enabled.", +"Architecturally defined non-LLC events.", +"Most documented core/L2 events.", +"Most documented core/L2 and LLC events." +], +"type": "string" +}, "threadsPerCore": { "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "format": "int64", @@ -2862,6 +2882,12 @@ }, "type": "object" }, +"AnonymousAuthenticationConfig": { +"description": "AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication.", +"id": "AnonymousAuthenticationConfig", +"properties": {}, +"type": "object" +}, "AuthenticatorGroupsConfig": { "description": "Configuration for returning group information from authenticators.", "id": "AuthenticatorGroupsConfig", @@ -3287,6 +3313,17 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, +"alphaClusterFeatureGates": { +"description": "The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. \"featureX=true\" or \"featureX=false\")", +"items": { +"type": "string" +}, +"type": "array" +}, +"anonymousAuthenticationConfig": { +"$ref": "AnonymousAuthenticationConfig", +"description": "Configuration for limiting anonymous access to all endpoints except the health checks." +}, "authenticatorGroupsConfig": { "$ref": "AuthenticatorGroupsConfig", "description": "Configuration controlling RBAC group membership information." @@ -3728,6 +3765,10 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, +"desiredAnonymousAuthenticationConfig": { +"$ref": "AnonymousAuthenticationConfig", +"description": "Configuration for limiting anonymous access to all endpoints except the health checks." +}, "desiredAuthenticatorGroupsConfig": { "$ref": "AuthenticatorGroupsConfig", "description": "The desired authenticator groups config for the cluster." @@ -4844,6 +4885,17 @@ false }, "type": "object" }, +"HighScaleCheckpointingConfig": { +"description": "Configuration for the High Scale Checkpointing.", +"id": "HighScaleCheckpointingConfig", +"properties": { +"enabled": { +"description": "Whether the High Scale Checkpointing is enabled for this cluster.", +"type": "boolean" +} +}, +"type": "object" +}, "HorizontalPodAutoscaling": { "description": "Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods.", "id": "HorizontalPodAutoscaling", diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json index d660cede23..204314acc3 100644 --- a/googleapiclient/discovery_cache/documents/container.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json @@ -2685,7 +2685,7 @@ } } }, -"revision": "20250429", +"revision": "20250513", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2814,6 +2814,10 @@ "$ref": "GkeBackupAgentConfig", "description": "Configuration for the Backup for GKE agent addon." }, +"highScaleCheckpointingConfig": { +"$ref": "HighScaleCheckpointingConfig", +"description": "Configuration for the High Scale Checkpointing add-on." +}, "horizontalPodAutoscaling": { "$ref": "HorizontalPodAutoscaling", "description": "Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods." @@ -2895,6 +2899,22 @@ "description": "Whether or not to enable nested virtualization (defaults to false).", "type": "boolean" }, +"performanceMonitoringUnit": { +"description": "Type of Performance Monitoring Unit (PMU) requested on node pool instances. If unset, PMU will not be available to the node.", +"enum": [ +"PERFORMANCE_MONITORING_UNIT_UNSPECIFIED", +"ARCHITECTURAL", +"STANDARD", +"ENHANCED" +], +"enumDescriptions": [ +"PMU not enabled.", +"Architecturally defined non-LLC events.", +"Most documented core/L2 events.", +"Most documented core/L2 and LLC events." +], +"type": "string" +}, "threadsPerCore": { "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "format": "int64", @@ -2903,6 +2923,12 @@ }, "type": "object" }, +"AnonymousAuthenticationConfig": { +"description": "AnonymousAuthenticationConfig defines the settings needed to limit endpoints that allow anonymous authentication.", +"id": "AnonymousAuthenticationConfig", +"properties": {}, +"type": "object" +}, "AuthenticatorGroupsConfig": { "description": "Configuration for returning group information from authenticators.", "id": "AuthenticatorGroupsConfig", @@ -3392,6 +3418,17 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, +"alphaClusterFeatureGates": { +"description": "The list of user specified Kubernetes feature gates. Each string represents the activation status of a feature gate (e.g. \"featureX=true\" or \"featureX=false\")", +"items": { +"type": "string" +}, +"type": "array" +}, +"anonymousAuthenticationConfig": { +"$ref": "AnonymousAuthenticationConfig", +"description": "Configuration for limiting anonymous access to all endpoints except the health checks." +}, "authenticatorGroupsConfig": { "$ref": "AuthenticatorGroupsConfig", "description": "Configuration controlling RBAC group membership information." @@ -3896,6 +3933,10 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, +"desiredAnonymousAuthenticationConfig": { +"$ref": "AnonymousAuthenticationConfig", +"description": "Configuration for limiting anonymous access to all endpoints except the health checks." +}, "desiredAuthenticatorGroupsConfig": { "$ref": "AuthenticatorGroupsConfig", "description": "AuthenticatorGroupsConfig specifies the config for the cluster security groups settings." @@ -5085,6 +5126,17 @@ false }, "type": "object" }, +"HighScaleCheckpointingConfig": { +"description": "Configuration for the High Scale Checkpointing.", +"id": "HighScaleCheckpointingConfig", +"properties": { +"enabled": { +"description": "Whether the High Scale Checkpointing is enabled for this cluster.", +"type": "boolean" +} +}, +"type": "object" +}, "HorizontalPodAutoscaling": { "description": "Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods.", "id": "HorizontalPodAutoscaling", diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json index 4e5df9a889..a43ab5418c 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json @@ -1090,7 +1090,7 @@ }, "reconcile": { "deprecated": true, -"description": "`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message.", +"description": "`ReconcileTags` creates or updates a list of tags on the entry. If the ReconcileTagsRequest.force_delete_missing parameter is set, the operation deletes tags not included in the input tag list. `ReconcileTags` returns a long-running operation resource that can be queried with Operations.GetOperation to return ReconcileTagsMetadata and a ReconcileTagsResponse message. Note: SearchCatalog might return stale search results for up to 24 hours after the `ReconcileTags` operation completes.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/entryGroups/{entryGroupsId}/entries/{entriesId}/tags:reconcile", "httpMethod": "POST", "id": "datacatalog.projects.locations.entryGroups.entries.tags.reconcile", @@ -2339,7 +2339,7 @@ } } }, -"revision": "20250502", +"revision": "20250520", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json index c49202bb03..b6dd75d8da 100644 --- a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json +++ b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json @@ -693,6 +693,41 @@ "https://www.googleapis.com/auth/compute" ] }, +"getWorkerStacktraces": { +"description": "Get worker stacktraces from debug capture.", +"flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/debug/getWorkerStacktraces", +"httpMethod": "POST", +"id": "dataflow.projects.jobs.debug.getWorkerStacktraces", +"parameterOrder": [ +"projectId", +"jobId" +], +"parameters": { +"jobId": { +"description": "The job for which to get stacktraces.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "The project id.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1b3/projects/{projectId}/jobs/{jobId}/debug/getWorkerStacktraces", +"request": { +"$ref": "GetWorkerStacktracesRequest" +}, +"response": { +"$ref": "GetWorkerStacktracesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, "sendCapture": { "description": "Send encoded debug capture data for component.", "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/debug/sendCapture", @@ -2208,7 +2243,7 @@ } } }, -"revision": "20250505", +"revision": "20250519", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -3796,6 +3831,31 @@ }, "type": "object" }, +"GetWorkerStacktracesRequest": { +"description": "Request to get worker stacktraces from debug capture.", +"id": "GetWorkerStacktracesRequest", +"properties": { +"workerId": { +"description": "The worker for which to get stacktraces. The returned stacktraces will be for the SDK harness running on this worker.", +"type": "string" +} +}, +"type": "object" +}, +"GetWorkerStacktracesResponse": { +"description": "Response to get worker stacktraces from debug capture.", +"id": "GetWorkerStacktracesResponse", +"properties": { +"sdks": { +"description": "Repeated as unified worker may have multiple SDK processes.", +"items": { +"$ref": "Sdk" +}, +"type": "array" +} +}, +"type": "object" +}, "Histogram": { "description": "Histogram of value counts for a distribution. Buckets have an inclusive lower bound and exclusive upper bound and use \"1,2,5 bucketing\": The first bucket range is from [0,1) and all subsequent bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ... Negative values are not supported.", "id": "Histogram", @@ -5788,6 +5848,24 @@ false }, "type": "object" }, +"Sdk": { +"description": "A structured representation of an SDK.", +"id": "Sdk", +"properties": { +"sdkId": { +"description": "The SDK harness id.", +"type": "string" +}, +"stacks": { +"description": "The stacktraces for the processes running on the SDK harness.", +"items": { +"$ref": "Stack" +}, +"type": "array" +} +}, +"type": "object" +}, "SdkBug": { "description": "A bug found in the Dataflow SDK.", "id": "SdkBug", @@ -6494,6 +6572,35 @@ false }, "type": "object" }, +"Stack": { +"description": "A structuredstacktrace for a process running on the worker.", +"id": "Stack", +"properties": { +"stackContent": { +"description": "The raw stack trace.", +"type": "string" +}, +"threadCount": { +"description": "With java thread dumps we may get collapsed stacks e.g., N threads in stack \"\". Instead of having to copy over the same stack trace N times, this int field captures this.", +"format": "int32", +"type": "integer" +}, +"threadName": { +"description": "Thread name. For example, \"CommitThread-0,10,main\"", +"type": "string" +}, +"threadState": { +"description": "The state of the thread. For example, \"WAITING\".", +"type": "string" +}, +"timestamp": { +"description": "Timestamp at which the stack was captured.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "StageExecutionDetails": { "description": "Information about the workers and work items within a stage.", "id": "StageExecutionDetails", diff --git a/googleapiclient/discovery_cache/documents/dataform.v1beta1.json b/googleapiclient/discovery_cache/documents/dataform.v1beta1.json index 1aab75644a..b4ea2a47ce 100644 --- a/googleapiclient/discovery_cache/documents/dataform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/dataform.v1beta1.json @@ -2147,7 +2147,7 @@ } } }, -"revision": "20250420", +"revision": "20250518", "rootUrl": "https://dataform.googleapis.com/", "schemas": { "ActionErrorTable": { @@ -3428,6 +3428,10 @@ "description": "Configures various aspects of Dataform notebook runtime.", "id": "NotebookRuntimeOptions", "properties": { +"aiPlatformNotebookRuntimeTemplate": { +"description": "Optional. The resource name of the [Colab runtime template] (https://cloud.google.com/colab/docs/runtimes), from which a runtime is created for notebook executions. If not specified, a runtime is created with Colab's default specifications.", +"type": "string" +}, "gcsOutputBucket": { "description": "Optional. The Google Cloud Storage location to upload the result to. Format: `gs://bucket-name`.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index 120e780dee..f85a6e5925 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -2340,7 +2340,7 @@ } } }, -"revision": "20250507", +"revision": "20250519", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AlloyDbConnectionProfile": { @@ -2394,7 +2394,8 @@ "type": "object" }, "primaryInstanceSettings": { -"$ref": "PrimaryInstanceSettings" +"$ref": "PrimaryInstanceSettings", +"description": "Settings for the cluster's primary instance" }, "vpcNetwork": { "description": "Required. The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. It is specified in the form: \"projects/{project_number}/global/networks/{network_id}\". This is required to create a cluster.", @@ -6610,6 +6611,13 @@ "description": "Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' field is mandatory.", "type": "string" }, +"sslFlags": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. SSL flags used for establishing SSL connection to the source database. Only source specific flags are supported. An object containing a list of \"key\": \"value\" pairs. Example: { \"server_certificate_hostname\": \"server.com\"}.", +"type": "object" +}, "type": { "description": "Optional. The ssl config type according to 'client_key', 'client_certificate' and 'ca_certificate'.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/dataplex.v1.json b/googleapiclient/discovery_cache/documents/dataplex.v1.json index da883f9bb9..09aa6b3797 100644 --- a/googleapiclient/discovery_cache/documents/dataplex.v1.json +++ b/googleapiclient/discovery_cache/documents/dataplex.v1.json @@ -3417,14 +3417,14 @@ "type": "string" }, "parent": { -"description": "Required. The parent resource where this Glossary will be created. Format: projects/{projectId}/locations/{locationId} where locationId refers to a GCP region.", +"description": "Required. The parent resource where this Glossary will be created. Format: projects/{project_id_or_number}/locations/{location_id} where location_id refers to a GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "validateOnly": { -"description": "Optional. Validates the request without actually creating the glossary. Default: false.", +"description": "Optional. Validates the request without actually creating the Glossary. Default: false.", "location": "query", "type": "boolean" } @@ -3441,7 +3441,7 @@ ] }, "delete": { -"description": "Deletes a Glossary resource. All the categories and terms within the glossary must be deleted before a glossary can be deleted.", +"description": "Deletes a Glossary resource. All the categories and terms within the Glossary must be deleted before the Glossary can be deleted.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}", "httpMethod": "DELETE", "id": "dataplex.projects.locations.glossaries.delete", @@ -3455,7 +3455,7 @@ "type": "string" }, "name": { -"description": "Required. The name of the Glossary to delete. Format: projects/{project}/locations/{location}/glossary/{glossary}", +"description": "Required. The name of the Glossary to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -3471,7 +3471,7 @@ ] }, "get": { -"description": "Retrieves a specified Glossary resource.", +"description": "Gets a Glossary resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}", "httpMethod": "GET", "id": "dataplex.projects.locations.glossaries.get", @@ -3480,7 +3480,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the Glossary to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary}", +"description": "Required. The name of the Glossary to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -3536,17 +3536,17 @@ ], "parameters": { "filter": { -"description": "Optional. Filter expression that filters glossaries listed in the response. Initially, no filter is supported.", +"description": "Optional. Filter expression that filters Glossaries listed in the response. Filters on proto fields of Glossary are supported. Examples of using a filter are: - display_name=\"my-glossary\" - categoryCount=1 - termCount=0", "location": "query", "type": "string" }, "orderBy": { -"description": "Optional. Order by expression that orders glossaries listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", +"description": "Optional. Order by expression that orders Glossaries listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", "location": "query", "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of glossaries to return. The service may return fewer than this value. If unspecified, at most 50 glossaries will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of Glossaries to return. The service may return fewer than this value. If unspecified, at most 50 Glossaries will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" @@ -3557,7 +3557,7 @@ "type": "string" }, "parent": { -"description": "Required. The parent, which has this collection of glossaries. Format: projects/{project}/locations/{location} Location is the GCP region.", +"description": "Required. The parent, which has this collection of Glossaries. Format: projects/{project_id_or_number}/locations/{location_id} where location_id refers to a GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -3582,7 +3582,7 @@ ], "parameters": { "name": { -"description": "Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}", +"description": "Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -3595,7 +3595,7 @@ "type": "string" }, "validateOnly": { -"description": "Optional. Validates the request without actually updating the glossary. Default: false.", +"description": "Optional. Validates the request without actually updating the Glossary. Default: false.", "location": "query", "type": "boolean" } @@ -3672,7 +3672,7 @@ "categories": { "methods": { "create": { -"description": "GlossaryCategory APIs are CCFE passthrough APIs. Creates a new GlossaryCategory resource.", +"description": "Creates a new GlossaryCategory resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/categories", "httpMethod": "POST", "id": "dataplex.projects.locations.glossaries.categories.create", @@ -3681,12 +3681,12 @@ ], "parameters": { "categoryId": { -"description": "Required. Category ID: GlossaryCategory identifier.", +"description": "Required. GlossaryCategory identifier.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The parent resource where this GlossaryCategory will be created. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} where locationId refers to a GCP region.", +"description": "Required. The parent resource where this GlossaryCategory will be created. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where locationId refers to a GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -3705,7 +3705,7 @@ ] }, "delete": { -"description": "Deletes a GlossaryCategory resource. All the categories and terms nested directly under the category will be moved one level up to the parent in the hierarchy.", +"description": "Deletes a GlossaryCategory resource. All the GlossaryCategories and GlossaryTerms nested directly under the specified GlossaryCategory will be moved one level up to the parent in the hierarchy.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/categories/{categoriesId}", "httpMethod": "DELETE", "id": "dataplex.projects.locations.glossaries.categories.delete", @@ -3714,7 +3714,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the GlossaryCategory to delete. Format: projects/{project}/locations/{location}/glossary/{glossary}/categories/{glossary_category}", +"description": "Required. The name of the GlossaryCategory to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/categories/[^/]+$", "required": true, @@ -3730,7 +3730,7 @@ ] }, "get": { -"description": "Retrieves a specified GlossaryCategory resource.", +"description": "Gets a GlossaryCategory resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/categories/{categoriesId}", "httpMethod": "GET", "id": "dataplex.projects.locations.glossaries.categories.get", @@ -3739,7 +3739,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the GlossaryCategory to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary}/categories/{glossary_category}", +"description": "Required. The name of the GlossaryCategory to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/categories/[^/]+$", "required": true, @@ -3786,7 +3786,7 @@ ] }, "list": { -"description": "Lists GlossaryCategory resources in a glossary.", +"description": "Lists GlossaryCategory resources in a Glossary.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/categories", "httpMethod": "GET", "id": "dataplex.projects.locations.glossaries.categories.list", @@ -3795,17 +3795,17 @@ ], "parameters": { "filter": { -"description": "Optional. Filter expression that filters categories listed in the response. Filters supported: List GlossaryCategories based on immediate parent in the resource hierarchy. This will only return the GlossaryCategories nested directly under the parent and no other subsequent nested categories will be returned.", +"description": "Optional. Filter expression that filters GlossaryCategories listed in the response. Filters are supported on the following fields: - immediate_parentExamples of using a filter are: - immediate_parent=\"projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}\" - immediate_parent=\"projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}\"This will only return the GlossaryCategories that are directly nested under the specified parent.", "location": "query", "type": "string" }, "orderBy": { -"description": "Optional. Order by expression that orders categories listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", +"description": "Optional. Order by expression that orders GlossaryCategories listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", "location": "query", "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of categories to return. The service may return fewer than this value. If unspecified, at most 50 categories will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of GlossaryCategories to return. The service may return fewer than this value. If unspecified, at most 50 GlossaryCategories will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" @@ -3816,7 +3816,7 @@ "type": "string" }, "parent": { -"description": "Required. The parent, which has this collection of categories. Format: projects/{project}/locations/{location}/glossaries/{glossary} Location is the GCP region.", +"description": "Required. The parent, which has this collection of GlossaryCategories. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} Location is the GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -3841,7 +3841,7 @@ ], "parameters": { "name": { -"description": "Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}", +"description": "Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/categories/[^/]+$", "required": true, @@ -3926,7 +3926,7 @@ "terms": { "methods": { "create": { -"description": "GlossaryTerm APIs are CCFE passthrough APIs. Creates a new GlossaryTerm resource.", +"description": "Creates a new GlossaryTerm resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/terms", "httpMethod": "POST", "id": "dataplex.projects.locations.glossaries.terms.create", @@ -3935,14 +3935,14 @@ ], "parameters": { "parent": { -"description": "Required. The parent resource where this GlossaryTerm will be created. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} where locationId refers to a GCP region.", +"description": "Required. The parent resource where the GlossaryTerm will be created. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where location_id refers to a GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, "type": "string" }, "termId": { -"description": "Required. Term ID: GlossaryTerm identifier.", +"description": "Required. GlossaryTerm identifier.", "location": "query", "type": "string" } @@ -3968,7 +3968,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the GlossaryTerm to delete. Format: projects/{project}/locations/{location}/glossary/{glossary}/terms/{glossary_term}", +"description": "Required. The name of the GlossaryTerm to delete. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/terms/[^/]+$", "required": true, @@ -3984,7 +3984,7 @@ ] }, "get": { -"description": "Retrieves a specified GlossaryTerm resource.", +"description": "Gets a GlossaryTerm resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/terms/{termsId}", "httpMethod": "GET", "id": "dataplex.projects.locations.glossaries.terms.get", @@ -3993,7 +3993,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the GlossaryTerm to retrieve. Format: projects/{project}/locations/{location}/glossaries/{glossary}/terms/{glossary_term}", +"description": "Required. The name of the GlossaryTerm to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/terms/[^/]+$", "required": true, @@ -4040,7 +4040,7 @@ ] }, "list": { -"description": "Lists GlossaryTerm resources in a glossary.", +"description": "Lists GlossaryTerm resources in a Glossary.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/glossaries/{glossariesId}/terms", "httpMethod": "GET", "id": "dataplex.projects.locations.glossaries.terms.list", @@ -4049,17 +4049,17 @@ ], "parameters": { "filter": { -"description": "Optional. Filter expression that filters terms listed in the response. Filters supported: List GlossaryTerms based on immediate parent in the resource hierarchy. This will only return the terms nested directly under the parent and no other subsequent nested terms will be returned.", +"description": "Optional. Filter expression that filters GlossaryTerms listed in the response. Filters are supported on the following fields: - immediate_parentExamples of using a filter are: - immediate_parent=\"projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}\" - immediate_parent=\"projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}\"This will only return the GlossaryTerms that are directly nested under the specified parent.", "location": "query", "type": "string" }, "orderBy": { -"description": "Optional. Order by expression that orders terms listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", +"description": "Optional. Order by expression that orders GlossaryTerms listed in the response. Order by fields are: name or create_time for the result. If not specified, the ordering is undefined.", "location": "query", "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of terms to return. The service may return fewer than this value. If unspecified, at most 50 terms will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of GlossaryTerms to return. The service may return fewer than this value. If unspecified, at most 50 GlossaryTerms will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" @@ -4070,7 +4070,7 @@ "type": "string" }, "parent": { -"description": "Required. The parent, which has this collection of terms. Format: projects/{project}/locations/{location}/glossaries/{glossary} Location is the GCP region.", +"description": "Required. The parent, which has this collection of GlossaryTerms. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} where location_id refers to a GCP region.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+$", "required": true, @@ -4095,7 +4095,7 @@ ], "parameters": { "name": { -"description": "Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId}", +"description": "Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/glossaries/[^/]+/terms/[^/]+$", "required": true, @@ -7027,7 +7027,7 @@ } } }, -"revision": "20250513", +"revision": "20250520", "rootUrl": "https://dataplex.googleapis.com/", "schemas": { "Empty": { @@ -8306,6 +8306,10 @@ "description": "Optional. The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. 1. If the Cloud Storage bucket is located in a multi-region bucket, then BigQuery dataset can be in the same multi-region bucket or any single region that is included in the same multi-region bucket. The datascan can be created in any single region that is included in the same multi-region bucket 2. If the Cloud Storage bucket is located in a dual-region bucket, then BigQuery dataset can be located in regions that are included in the dual-region bucket, or in a multi-region that includes the dual-region. The datascan can be created in any single region that is included in the same dual-region bucket. 3. If the Cloud Storage bucket is located in a single region, then BigQuery dataset can be in the same single region or any multi-region bucket that includes the same single region. The datascan will be created in the same single region as the bucket. 4. If the BigQuery dataset is in single region, it must be in the same single region as the datascan.For supported values, refer to https://cloud.google.com/bigquery/docs/locations#supported_locations.", "type": "string" }, +"project": { +"description": "Optional. The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is \"projects/{project_id_or_number}\".", +"type": "string" +}, "tableType": { "description": "Optional. Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables.", "enum": [ @@ -8720,6 +8724,19 @@ "readOnly": true, "type": "string" }, +"dimensions": { +"description": "Output only. The dimension-level results for this column.", +"items": { +"$ref": "GoogleCloudDataplexV1DataQualityDimensionResult" +}, +"readOnly": true, +"type": "array" +}, +"passed": { +"description": "Output only. Whether the column passed or failed.", +"readOnly": true, +"type": "boolean" +}, "score": { "description": "Output only. The column-level data quality score for this data scan job if and only if the 'column' field is set.The score ranges between between 0, 100 (up to two decimal points).", "format": "float", @@ -8767,6 +8784,11 @@ "description": "The output of a DataQualityScan.", "id": "GoogleCloudDataplexV1DataQualityResult", "properties": { +"catalogPublishingStatus": { +"$ref": "GoogleCloudDataplexV1DataScanCatalogPublishingStatus", +"description": "Output only. The status of publishing the data scan to Catalog.", +"readOnly": true +}, "columns": { "description": "Output only. A list of results at the column level.A column will have a corresponding DataQualityColumnResult if and only if there is at least one rule with the 'column' field set to it.", "items": { @@ -9230,6 +9252,10 @@ "description": "DataQualityScan related setting.", "id": "GoogleCloudDataplexV1DataQualitySpec", "properties": { +"catalogPublishingEnabled": { +"description": "Optional. If set, the latest DataScan job result will be published to Dataplex Catalog.", +"type": "boolean" +}, "postScanActions": { "$ref": "GoogleCloudDataplexV1DataQualitySpecPostScanActions", "description": "Optional. Actions to take upon job completion." @@ -9460,10 +9486,36 @@ }, "type": "object" }, +"GoogleCloudDataplexV1DataScanCatalogPublishingStatus": { +"description": "The status of publishing the data scan result to Catalog.", +"id": "GoogleCloudDataplexV1DataScanCatalogPublishingStatus", +"properties": { +"state": { +"description": "Output only. Execution state for catalog publishing.", +"enum": [ +"STATE_UNSPECIFIED", +"SUCCEEDED", +"FAILED" +], +"enumDescriptions": [ +"The publishing state is unspecified.", +"Publish to catalog completed successfully.", +"Publish to catalog failed." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDataplexV1DataScanEvent": { "description": "These messages contain information about the execution of a datascan. The monitored resource is 'DataScan'", "id": "GoogleCloudDataplexV1DataScanEvent", "properties": { +"catalogPublishingStatus": { +"$ref": "GoogleCloudDataplexV1DataScanCatalogPublishingStatus", +"description": "The status of publishing the data scan to Catalog." +}, "createTime": { "description": "The time when the data scan job was created.", "format": "google-datetime", @@ -10941,27 +10993,27 @@ "type": "object" }, "GoogleCloudDataplexV1Glossary": { -"description": "A Glossary represents a collection of categories and terms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the categories and terms within it.", +"description": "A Glossary represents a collection of GlossaryCategories and GlossaryTerms defined by the user. Glossary is a top level resource and is the GCP parent resource of all the GlossaryCategories and GlossaryTerms within it.", "id": "GoogleCloudDataplexV1Glossary", "properties": { "categoryCount": { -"description": "Output only. The number of categories in the glossary.", +"description": "Output only. The number of GlossaryCategories in the Glossary.", "format": "int32", "readOnly": true, "type": "integer" }, "createTime": { -"description": "Output only. The time at which the glossary was created.", +"description": "Output only. The time at which the Glossary was created.", "format": "google-datetime", "readOnly": true, "type": "string" }, "description": { -"description": "Optional. The user-mutable description of the glossary.", +"description": "Optional. The user-mutable description of the Glossary.", "type": "string" }, "displayName": { -"description": "Optional. User friendly display name of the glossary. This is user-mutable. This will be same as the glossaryId, if not specified.", +"description": "Optional. User friendly display name of the Glossary. This is user-mutable. This will be same as the GlossaryId, if not specified.", "type": "string" }, "etag": { @@ -10976,12 +11028,12 @@ "type": "object" }, "name": { -"description": "Output only. Identifier. The resource name of the Glossary. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}", +"description": "Output only. Identifier. The resource name of the Glossary. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}", "readOnly": true, "type": "string" }, "termCount": { -"description": "Output only. The number of terms in the glossary.", +"description": "Output only. The number of GlossaryTerms in the Glossary.", "format": "int32", "readOnly": true, "type": "integer" @@ -10992,7 +11044,7 @@ "type": "string" }, "updateTime": { -"description": "Output only. The time at which the glossary was last updated.", +"description": "Output only. The time at which the Glossary was last updated.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -11001,7 +11053,7 @@ "type": "object" }, "GoogleCloudDataplexV1GlossaryCategory": { -"description": "A GlossaryCategory represents a collection of categories and terms within a Glossary that are related to each other.", +"description": "A GlossaryCategory represents a collection of GlossaryCategories and GlossaryTerms within a Glossary that are related to each other.", "id": "GoogleCloudDataplexV1GlossaryCategory", "properties": { "createTime": { @@ -11015,7 +11067,7 @@ "type": "string" }, "displayName": { -"description": "Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the categoryId, if not specified.", +"description": "Optional. User friendly display name of the GlossaryCategory. This is user-mutable. This will be same as the GlossaryCategoryId, if not specified.", "type": "string" }, "labels": { @@ -11026,12 +11078,12 @@ "type": "object" }, "name": { -"description": "Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}", +"description": "Output only. Identifier. The resource name of the GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "readOnly": true, "type": "string" }, "parent": { -"description": "Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}", +"description": "Required. The immediate parent of the GlossaryCategory in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "type": "string" }, "uid": { @@ -11049,7 +11101,7 @@ "type": "object" }, "GoogleCloudDataplexV1GlossaryTerm": { -"description": "GlossaryTerms are the core of glossary. A GlossaryTerm holds a rich text description that can be attached to entries or specific columns to enrich them.", +"description": "GlossaryTerms are the core of Glossary. A GlossaryTerm holds a rich text description that can be attached to Entries or specific columns to enrich them.", "id": "GoogleCloudDataplexV1GlossaryTerm", "properties": { "createTime": { @@ -11063,7 +11115,7 @@ "type": "string" }, "displayName": { -"description": "Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the termId, if not specified.", +"description": "Optional. User friendly display name of the GlossaryTerm. This is user-mutable. This will be same as the GlossaryTermId, if not specified.", "type": "string" }, "labels": { @@ -11074,12 +11126,12 @@ "type": "object" }, "name": { -"description": "Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/terms/{termId}", +"description": "Output only. Identifier. The resource name of the GlossaryTerm. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/terms/{term_id}", "readOnly": true, "type": "string" }, "parent": { -"description": "Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a Category. Format: projects/{projectId}/locations/{locationId}/glossaries/{glossaryId} OR projects/{projectId}/locations/{locationId}/glossaries/{glossaryId}/categories/{categoryId}", +"description": "Required. The immediate parent of the GlossaryTerm in the resource-hierarchy. It can either be a Glossary or a GlossaryCategory. Format: projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id} OR projects/{project_id_or_number}/locations/{location_id}/glossaries/{glossary_id}/categories/{category_id}", "type": "string" }, "uid": { @@ -11864,7 +11916,7 @@ "id": "GoogleCloudDataplexV1ListGlossariesResponse", "properties": { "glossaries": { -"description": "Lists the glossaries in the specified parent.", +"description": "Lists the Glossaries in the specified parent.", "items": { "$ref": "GoogleCloudDataplexV1Glossary" }, @@ -11889,7 +11941,7 @@ "id": "GoogleCloudDataplexV1ListGlossaryCategoriesResponse", "properties": { "categories": { -"description": "Lists the glossaryCategories in the specified parent.", +"description": "Lists the GlossaryCategories in the specified parent.", "items": { "$ref": "GoogleCloudDataplexV1GlossaryCategory" }, @@ -11918,7 +11970,7 @@ "type": "string" }, "terms": { -"description": "Lists the terms in the specified parent.", +"description": "Lists the GlossaryTerms in the specified parent.", "items": { "$ref": "GoogleCloudDataplexV1GlossaryTerm" }, diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index 4f312b0af9..9727ef529e 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -680,72 +680,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"accessNativeBuildInfo": { -"description": "Obtain build data for Native Job", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessNativeBuildInfo", -"httpMethod": "GET", -"id": "dataproc.projects.locations.batches.sparkApplications.accessNativeBuildInfo", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"parent": { -"description": "Required. Parent (Batch) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:accessNativeBuildInfo", -"response": { -"$ref": "AccessSparkApplicationNativeBuildInfoResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"accessNativeSqlQuery": { -"description": "Obtain data corresponding to a particular Native SQL Query for a Spark Application.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessNativeSqlQuery", -"httpMethod": "GET", -"id": "dataproc.projects.locations.batches.sparkApplications.accessNativeSqlQuery", -"parameterOrder": [ -"name" -], -"parameters": { -"executionId": { -"description": "Required. Execution ID", -"format": "int64", -"location": "query", -"type": "string" -}, -"name": { -"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"parent": { -"description": "Required. Parent (Batch) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:accessNativeSqlQuery", -"response": { -"$ref": "AccessSparkApplicationNativeSqlQueryResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "accessSqlPlan": { "description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", @@ -1156,47 +1090,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"searchNativeSqlQueries": { -"description": "Obtain data corresponding to Native SQL Queries for a Spark Application.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchNativeSqlQueries", -"httpMethod": "GET", -"id": "dataproc.projects.locations.batches.sparkApplications.searchNativeSqlQueries", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"pageSize": { -"description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "Optional. A page token received from a previous SearchSparkApplicationNativeSqlQueries call. Provide this token to retrieve the subsequent page.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. Parent (Batch) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:searchNativeSqlQueries", -"response": { -"$ref": "SearchSparkApplicationNativeSqlQueriesResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "searchSqlQueries": { "description": "Obtain data corresponding to SQL Queries for a Spark Application.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", @@ -2147,72 +2040,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"accessNativeBuildInfo": { -"description": "Obtain data corresponding to Native Build Information for a Spark Application.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessNativeBuildInfo", -"httpMethod": "GET", -"id": "dataproc.projects.locations.sessions.sparkApplications.accessNativeBuildInfo", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"parent": { -"description": "Required. Parent (Session) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:accessNativeBuildInfo", -"response": { -"$ref": "AccessSessionSparkApplicationNativeBuildInfoResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"accessNativeSqlQuery": { -"description": "Obtain data corresponding to a particular Native SQL Query for a Spark Application.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessNativeSqlQuery", -"httpMethod": "GET", -"id": "dataproc.projects.locations.sessions.sparkApplications.accessNativeSqlQuery", -"parameterOrder": [ -"name" -], -"parameters": { -"executionId": { -"description": "Required. Execution ID", -"format": "int64", -"location": "query", -"type": "string" -}, -"name": { -"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"parent": { -"description": "Required. Parent (Session) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:accessNativeSqlQuery", -"response": { -"$ref": "AccessSessionSparkApplicationNativeSqlQueryResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "accessSqlPlan": { "description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", @@ -2623,47 +2450,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"searchNativeSqlQueries": { -"description": "Obtain data corresponding to Native SQL Queries for a Spark Application.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchNativeSqlQueries", -"httpMethod": "GET", -"id": "dataproc.projects.locations.sessions.sparkApplications.searchNativeSqlQueries", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", -"required": true, -"type": "string" -}, -"pageSize": { -"description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. Parent (Session) resource reference.", -"location": "query", -"type": "string" -} -}, -"path": "v1/{+name}:searchNativeSqlQueries", -"response": { -"$ref": "SearchSessionSparkApplicationNativeSqlQueriesResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "searchSqlQueries": { "description": "Obtain data corresponding to SQL Queries for a Spark Application.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", @@ -5192,7 +4978,7 @@ } } }, -"revision": "20250315", +"revision": "20250513", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -5234,28 +5020,6 @@ }, "type": "object" }, -"AccessSessionSparkApplicationNativeBuildInfoResponse": { -"description": "Details of a native build info for a Spark Application", -"id": "AccessSessionSparkApplicationNativeBuildInfoResponse", -"properties": { -"executionData": { -"$ref": "NativeBuildInfoUiData", -"description": "Native SQL Execution Data" -} -}, -"type": "object" -}, -"AccessSessionSparkApplicationNativeSqlQueryResponse": { -"description": "Details of a native query for a Spark Application", -"id": "AccessSessionSparkApplicationNativeSqlQueryResponse", -"properties": { -"executionData": { -"$ref": "NativeSqlExecutionUiData", -"description": "Native SQL Execution Data" -} -}, -"type": "object" -}, "AccessSessionSparkApplicationResponse": { "description": "A summary of Spark Application", "id": "AccessSessionSparkApplicationResponse", @@ -5336,28 +5100,6 @@ }, "type": "object" }, -"AccessSparkApplicationNativeBuildInfoResponse": { -"description": "Details of Native Build Info for a Spark Application", -"id": "AccessSparkApplicationNativeBuildInfoResponse", -"properties": { -"buildInfo": { -"$ref": "NativeBuildInfoUiData", -"description": "Native Build Info Data" -} -}, -"type": "object" -}, -"AccessSparkApplicationNativeSqlQueryResponse": { -"description": "Details of a query for a Spark Application", -"id": "AccessSparkApplicationNativeSqlQueryResponse", -"properties": { -"executionData": { -"$ref": "NativeSqlExecutionUiData", -"description": "Native SQL Execution Data" -} -}, -"type": "object" -}, "AccessSparkApplicationResponse": { "description": "A summary of Spark Application", "id": "AccessSparkApplicationResponse", @@ -5701,6 +5443,20 @@ "basicAlgorithm": { "$ref": "BasicAutoscalingAlgorithm" }, +"clusterType": { +"description": "Optional. The type of the clusters for which this autoscaling policy is to be configured.", +"enum": [ +"CLUSTER_TYPE_UNSPECIFIED", +"STANDARD", +"ZERO_SCALE" +], +"enumDescriptions": [ +"Not set.", +"Standard dataproc cluster with minimum 2 primary workers.", +"Clusters that can be scaled down to zero worker nodes." +], +"type": "string" +}, "id": { "description": "Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", "type": "string" @@ -5739,13 +5495,17 @@ "SCENARIO_UNSPECIFIED", "SCALING", "BROADCAST_HASH_JOIN", -"MEMORY" +"MEMORY", +"NONE", +"AUTO" ], "enumDescriptions": [ "Default value.", "Scaling recommendations such as initialExecutors.", "Adding hints for potential relation broadcasts.", -"Memory management for workloads." +"Memory management for workloads.", +"No autotuning.", +"Automatic selection of scenarios." ], "type": "string" }, @@ -6113,6 +5873,22 @@ }, "type": "array" }, +"clusterType": { +"description": "Optional. The type of the cluster.", +"enum": [ +"CLUSTER_TYPE_UNSPECIFIED", +"STANDARD", +"SINGLE_NODE", +"ZERO_SCALE" +], +"enumDescriptions": [ +"Not set.", +"Standard dataproc cluster with a minimum of two primary workers.", +"https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/single-node-clusters", +"Clusters that can be scaled down to zero worker nodes." +], +"type": "string" +}, "configBucket": { "description": "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", "type": "string" @@ -6597,7 +6373,7 @@ "type": "object" }, "DiskConfig": { -"description": "Specifies the config of disk options for a group of VM instances.", +"description": "Specifies the config of boot disk and attached disk options for a group of VM instances.", "id": "DiskConfig", "properties": { "bootDiskProvisionedIops": { @@ -8426,6 +8202,16 @@ "format": "google-duration", "type": "string" }, +"autoStopTime": { +"description": "Optional. The time when cluster will be auto-stopped (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-datetime", +"type": "string" +}, +"autoStopTtl": { +"description": "Optional. The lifetime duration of the cluster. The cluster will be auto-stopped at the end of this period, calculated from the time of submission of the create or update cluster request. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-duration", +"type": "string" +}, "idleDeleteTtl": { "description": "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-duration", @@ -8436,6 +8222,11 @@ "format": "google-datetime", "readOnly": true, "type": "string" +}, +"idleStopTtl": { +"description": "Optional. The duration to keep the cluster started while idling (when no jobs are running). Passing this threshold will cause the cluster to be stopped. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-duration", +"type": "string" } }, "type": "object" @@ -9335,6 +9126,21 @@ }, "type": "object" }, +"PropertiesInfo": { +"description": "Properties of the workload organized by origin.", +"id": "PropertiesInfo", +"properties": { +"autotuningProperties": { +"additionalProperties": { +"$ref": "ValueInfo" +}, +"description": "Output only. Properties set by autotuning engine.", +"readOnly": true, +"type": "object" +} +}, +"type": "object" +}, "ProvisioningModelMix": { "description": "Defines how Dataproc should create VMs with a mixture of provisioning models.", "id": "ProvisioningModelMix", @@ -9986,6 +9792,10 @@ "description": "Output only. A URI pointing to the location of the stdout and stderr of the workload.", "readOnly": true, "type": "string" +}, +"propertiesInfo": { +"$ref": "PropertiesInfo", +"description": "Optional. Properties of the workload organized by origin." } }, "type": "object" @@ -10045,25 +9855,6 @@ }, "type": "object" }, -"SearchSessionSparkApplicationNativeSqlQueriesResponse": { -"description": "List of all Native queries for a Spark Application.", -"id": "SearchSessionSparkApplicationNativeSqlQueriesResponse", -"properties": { -"nextPageToken": { -"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest.", -"type": "string" -}, -"sparkApplicationNativeSqlQueries": { -"description": "Output only. Native SQL Execution Data", -"items": { -"$ref": "NativeSqlExecutionUiData" -}, -"readOnly": true, -"type": "array" -} -}, -"type": "object" -}, "SearchSessionSparkApplicationSqlQueriesResponse": { "description": "List of all queries for a Spark Application.", "id": "SearchSessionSparkApplicationSqlQueriesResponse", @@ -10214,25 +10005,6 @@ }, "type": "object" }, -"SearchSparkApplicationNativeSqlQueriesResponse": { -"description": "List of all Native SQL queries details for a Spark Application.", -"id": "SearchSparkApplicationNativeSqlQueriesResponse", -"properties": { -"nextPageToken": { -"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationNativeSqlQueriesRequest.", -"type": "string" -}, -"sparkApplicationNativeSqlQueries": { -"description": "Output only. Native SQL Execution Data", -"items": { -"$ref": "NativeSqlExecutionUiData" -}, -"readOnly": true, -"type": "array" -} -}, -"type": "object" -}, "SearchSparkApplicationSqlQueriesResponse": { "description": "List of all queries for a Spark Application.", "id": "SearchSparkApplicationSqlQueriesResponse", @@ -12838,6 +12610,25 @@ }, "type": "object" }, +"ValueInfo": { +"description": "Annotatated property value.", +"id": "ValueInfo", +"properties": { +"annotation": { +"description": "Annotation, comment or explanation why the property was set.", +"type": "string" +}, +"overriddenValue": { +"description": "Optional. Value which was replaced by the corresponding component.", +"type": "string" +}, +"value": { +"description": "Property value.", +"type": "string" +} +}, +"type": "object" +}, "ValueValidation": { "description": "Validation based on a list of allowed values.", "id": "ValueValidation", diff --git a/googleapiclient/discovery_cache/documents/datastream.v1.json b/googleapiclient/discovery_cache/documents/datastream.v1.json index ef7aca531b..ba55ecdc88 100644 --- a/googleapiclient/discovery_cache/documents/datastream.v1.json +++ b/googleapiclient/discovery_cache/documents/datastream.v1.json @@ -1261,7 +1261,7 @@ } } }, -"revision": "20250511", +"revision": "20250518", "rootUrl": "https://datastream.googleapis.com/", "schemas": { "AppendOnly": { @@ -2231,6 +2231,10 @@ "$ref": "SrvConnectionFormat", "description": "Srv connection format." }, +"sslConfig": { +"$ref": "MongodbSslConfig", +"description": "Optional. SSL configuration for the MongoDB connection." +}, "standardConnectionFormat": { "$ref": "StandardConnectionFormat", "description": "Standard connection format." @@ -2262,6 +2266,44 @@ }, "type": "object" }, +"MongodbSslConfig": { +"description": "MongoDB SSL configuration information.", +"id": "MongodbSslConfig", +"properties": { +"caCertificate": { +"description": "Optional. Input only. PEM-encoded certificate of the CA that signed the source database server's certificate.", +"type": "string" +}, +"caCertificateSet": { +"description": "Output only. Indicates whether the ca_certificate field is set.", +"readOnly": true, +"type": "boolean" +}, +"clientCertificate": { +"description": "Optional. Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory.", +"type": "string" +}, +"clientCertificateSet": { +"description": "Output only. Indicates whether the client_certificate field is set.", +"readOnly": true, +"type": "boolean" +}, +"clientKey": { +"description": "Optional. Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory.", +"type": "string" +}, +"clientKeySet": { +"description": "Output only. Indicates whether the client_key field is set.", +"readOnly": true, +"type": "boolean" +}, +"secretManagerStoredClientKey": { +"description": "Optional. Input only. A reference to a Secret Manager resource name storing the PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory. Mutually exclusive with the `client_key` field.", +"type": "string" +} +}, +"type": "object" +}, "MostRecentStartPosition": { "description": "CDC strategy to start replicating from the most recent position in the source.", "id": "MostRecentStartPosition", @@ -3174,7 +3216,7 @@ "id": "PscInterfaceConfig", "properties": { "networkAttachment": { -"description": "Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}`", +"description": "Required. Fully qualified name of the Network Attachment that Datastream will connect to. Format: `projects/{project}/regions/{region}/networkAttachments/{name}`", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json index c0cce9eba8..97a024a6bd 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json @@ -7052,7 +7052,7 @@ } } }, -"revision": "20250514", +"revision": "20250521", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -9187,6 +9187,14 @@ "readOnly": true, "type": "array" }, +"annotationMetadata": { +"description": "Output only. The annotation metadata includes structured content in the current chunk.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1ChunkAnnotationMetadata" +}, +"readOnly": true, +"type": "array" +}, "chunkMetadata": { "$ref": "GoogleCloudDiscoveryengineV1ChunkChunkMetadata", "description": "Output only. Metadata of the current chunk.", @@ -9238,6 +9246,23 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1ChunkAnnotationMetadata": { +"description": "The annotation metadata includes structured content in the current chunk.", +"id": "GoogleCloudDiscoveryengineV1ChunkAnnotationMetadata", +"properties": { +"imageId": { +"description": "Output only. Image id is provided if the structured content is based on an image.", +"readOnly": true, +"type": "string" +}, +"structuredContent": { +"$ref": "GoogleCloudDiscoveryengineV1ChunkStructuredContent", +"description": "Output only. The structured content information.", +"readOnly": true +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1ChunkChunkMetadata": { "description": "Metadata of the current chunk. This field is only populated on SearchService.Search API.", "id": "GoogleCloudDiscoveryengineV1ChunkChunkMetadata", @@ -9299,6 +9324,35 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1ChunkStructuredContent": { +"description": "The structured content information.", +"id": "GoogleCloudDiscoveryengineV1ChunkStructuredContent", +"properties": { +"content": { +"description": "Output only. The content of the structured content.", +"readOnly": true, +"type": "string" +}, +"structureType": { +"description": "Output only. The structure type of the structured content.", +"enum": [ +"STRUCTURE_TYPE_UNSPECIFIED", +"SHAREHOLDER_STRUCTURE", +"SIGNATURE_STRUCTURE", +"CHECKBOX_STRUCTURE" +], +"enumDescriptions": [ +"Default value.", +"Shareholder structure.", +"Signature structure.", +"Checkbox structure." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1CloudSqlSource": { "description": "Cloud SQL source import data from.", "id": "GoogleCloudDiscoveryengineV1CloudSqlSource", @@ -10745,6 +10799,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -10850,6 +10911,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -14662,10 +14740,6 @@ "readOnly": true, "type": "string" }, -"user": { -"description": "Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created.", -"type": "string" -}, "userPrincipal": { "description": "Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal.", "type": "string" @@ -15832,7 +15906,7 @@ "type": "array" }, "extractedRecordCount": { -"description": "The number of documents extracted from connector source, ready to be ingested to UCS.", +"description": "The number of documents extracted from connector source, ready to be ingested to VAIS.", "format": "int64", "type": "string" }, @@ -15845,6 +15919,11 @@ "$ref": "GoogleCloudDiscoveryengineV1alphaConnectorRunEntityRunProgress", "description": "Metadata to generate the progress bar." }, +"scheduledRecordCount": { +"description": "The number of documents scheduled to be crawled/extracted from connector source. This only applies to third party connectors.", +"format": "int64", +"type": "string" +}, "sourceApiRequestCount": { "description": "The number of requests sent to 3p API.", "format": "int64", @@ -17172,6 +17251,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -17256,6 +17342,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -17494,6 +17597,12 @@ "readOnly": true, "type": "string" }, +"lastTrainTime": { +"description": "Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "lastTuneTime": { "description": "Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines.", "format": "google-datetime", @@ -20358,10 +20467,6 @@ "readOnly": true, "type": "string" }, -"user": { -"description": "Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created.", -"type": "string" -}, "userPrincipal": { "description": "Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal.", "type": "string" @@ -21377,6 +21482,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -21461,6 +21573,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index 7b0cf3bdaf..bea26d0af5 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -9173,7 +9173,7 @@ } } }, -"revision": "20250514", +"revision": "20250521", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "ApiservingMediaRequestInfo": { @@ -11163,6 +11163,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -11247,6 +11254,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -15035,6 +15059,14 @@ "readOnly": true, "type": "array" }, +"annotationMetadata": { +"description": "Output only. The annotation metadata includes structured content in the current chunk.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunkAnnotationMetadata" +}, +"readOnly": true, +"type": "array" +}, "chunkMetadata": { "$ref": "GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata", "description": "Output only. Metadata of the current chunk.", @@ -15086,6 +15118,23 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaChunkAnnotationMetadata": { +"description": "The annotation metadata includes structured content in the current chunk.", +"id": "GoogleCloudDiscoveryengineV1alphaChunkAnnotationMetadata", +"properties": { +"imageId": { +"description": "Output only. Image id is provided if the structured content is based on an image.", +"readOnly": true, +"type": "string" +}, +"structuredContent": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunkStructuredContent", +"description": "Output only. The structured content information.", +"readOnly": true +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata": { "description": "Metadata of the current chunk. This field is only populated on SearchService.Search API.", "id": "GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata", @@ -15147,6 +15196,35 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaChunkStructuredContent": { +"description": "The structured content information.", +"id": "GoogleCloudDiscoveryengineV1alphaChunkStructuredContent", +"properties": { +"content": { +"description": "Output only. The content of the structured content.", +"readOnly": true, +"type": "string" +}, +"structureType": { +"description": "Output only. The structure type of the structured content.", +"enum": [ +"STRUCTURE_TYPE_UNSPECIFIED", +"SHAREHOLDER_STRUCTURE", +"SIGNATURE_STRUCTURE", +"CHECKBOX_STRUCTURE" +], +"enumDescriptions": [ +"Default value.", +"Shareholder structure.", +"Signature structure.", +"Checkbox structure." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaCloudSqlSource": { "description": "Cloud SQL source import data from.", "id": "GoogleCloudDiscoveryengineV1alphaCloudSqlSource", @@ -15555,7 +15633,7 @@ "type": "array" }, "extractedRecordCount": { -"description": "The number of documents extracted from connector source, ready to be ingested to UCS.", +"description": "The number of documents extracted from connector source, ready to be ingested to VAIS.", "format": "int64", "type": "string" }, @@ -15568,6 +15646,11 @@ "$ref": "GoogleCloudDiscoveryengineV1alphaConnectorRunEntityRunProgress", "description": "Metadata to generate the progress bar." }, +"scheduledRecordCount": { +"description": "The number of documents scheduled to be crawled/extracted from connector source. This only applies to third party connectors.", +"format": "int64", +"type": "string" +}, "sourceApiRequestCount": { "description": "The number of requests sent to 3p API.", "format": "int64", @@ -17342,6 +17425,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -17458,6 +17548,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -17696,6 +17803,12 @@ "readOnly": true, "type": "string" }, +"lastTrainTime": { +"description": "Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "lastTuneTime": { "description": "Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines.", "format": "google-datetime", @@ -23623,10 +23736,6 @@ "readOnly": true, "type": "string" }, -"user": { -"description": "Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created.", -"type": "string" -}, "userPrincipal": { "description": "Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal.", "type": "string" @@ -24244,6 +24353,24 @@ "description": "If set to true, the widget will enable visual content summary on applicable search requests. Only used by healthcare search.", "type": "boolean" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Output only. Feature config for the engine to opt in or opt out of features. Supported keys: * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"readOnly": true, +"type": "object" +}, "generativeAnswerConfig": { "$ref": "GoogleCloudDiscoveryengineV1alphaWidgetConfigUiSettingsGenerativeAnswerConfig", "description": "Describes generative answer configuration." @@ -25346,6 +25473,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -25430,6 +25564,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index 7328b12865..a0e26aaba6 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -8004,7 +8004,7 @@ } } }, -"revision": "20250514", +"revision": "20250521", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -9402,6 +9402,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -9486,6 +9493,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -12015,7 +12039,7 @@ "type": "array" }, "extractedRecordCount": { -"description": "The number of documents extracted from connector source, ready to be ingested to UCS.", +"description": "The number of documents extracted from connector source, ready to be ingested to VAIS.", "format": "int64", "type": "string" }, @@ -12028,6 +12052,11 @@ "$ref": "GoogleCloudDiscoveryengineV1alphaConnectorRunEntityRunProgress", "description": "Metadata to generate the progress bar." }, +"scheduledRecordCount": { +"description": "The number of documents scheduled to be crawled/extracted from connector source. This only applies to third party connectors.", +"format": "int64", +"type": "string" +}, "sourceApiRequestCount": { "description": "The number of requests sent to 3p API.", "format": "int64", @@ -13355,6 +13384,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -13439,6 +13475,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -13677,6 +13730,12 @@ "readOnly": true, "type": "string" }, +"lastTrainTime": { +"description": "Output only. The timestamp when the latest successful training finished. Only applicable on Media Recommendation engines.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "lastTuneTime": { "description": "Output only. The timestamp when the latest successful tune finished. Only applicable on Media Recommendation engines.", "format": "google-datetime", @@ -16541,10 +16600,6 @@ "readOnly": true, "type": "string" }, -"user": { -"description": "Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created.", -"type": "string" -}, "userPrincipal": { "description": "Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal.", "type": "string" @@ -18733,6 +18788,14 @@ "readOnly": true, "type": "array" }, +"annotationMetadata": { +"description": "Output only. The annotation metadata includes structured content in the current chunk.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaChunkAnnotationMetadata" +}, +"readOnly": true, +"type": "array" +}, "chunkMetadata": { "$ref": "GoogleCloudDiscoveryengineV1betaChunkChunkMetadata", "description": "Output only. Metadata of the current chunk.", @@ -18784,6 +18847,23 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaChunkAnnotationMetadata": { +"description": "The annotation metadata includes structured content in the current chunk.", +"id": "GoogleCloudDiscoveryengineV1betaChunkAnnotationMetadata", +"properties": { +"imageId": { +"description": "Output only. Image id is provided if the structured content is based on an image.", +"readOnly": true, +"type": "string" +}, +"structuredContent": { +"$ref": "GoogleCloudDiscoveryengineV1betaChunkStructuredContent", +"description": "Output only. The structured content information.", +"readOnly": true +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaChunkChunkMetadata": { "description": "Metadata of the current chunk. This field is only populated on SearchService.Search API.", "id": "GoogleCloudDiscoveryengineV1betaChunkChunkMetadata", @@ -18845,6 +18925,35 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaChunkStructuredContent": { +"description": "The structured content information.", +"id": "GoogleCloudDiscoveryengineV1betaChunkStructuredContent", +"properties": { +"content": { +"description": "Output only. The content of the structured content.", +"readOnly": true, +"type": "string" +}, +"structureType": { +"description": "Output only. The structure type of the structured content.", +"enum": [ +"STRUCTURE_TYPE_UNSPECIFIED", +"SHAREHOLDER_STRUCTURE", +"SIGNATURE_STRUCTURE", +"CHECKBOX_STRUCTURE" +], +"enumDescriptions": [ +"Default value.", +"Shareholder structure.", +"Signature structure.", +"Checkbox structure." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaCloudSqlSource": { "description": "Cloud SQL source import data from.", "id": "GoogleCloudDiscoveryengineV1betaCloudSqlSource", @@ -20329,6 +20438,13 @@ "type": "string" }, "type": "array" +}, +"structuredContentTypes": { +"description": "Optional. Contains the required structure types to extract from the document. Supported values: * `shareholder-structure`", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -20445,6 +20561,23 @@ "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" }, +"features": { +"additionalProperties": { +"enum": [ +"FEATURE_STATE_UNSPECIFIED", +"FEATURE_STATE_ON", +"FEATURE_STATE_OFF" +], +"enumDescriptions": [ +"The feature state is unspecified.", +"The feature is turned on to be accessible.", +"The feature is turned off to be inaccessible." +], +"type": "string" +}, +"description": "Optional. Feature config for the engine to opt in or opt out of features. Supported keys: * `*`: all features, if it's present, all other feature state settings are ignored. * `agent-gallery` * `no-code-agent-builder` * `prompt-gallery` * `model-selector` * `notebook-lm` * `people-search` * `people-search-org-chart` * `bi-directional-audio` * `feedback`", +"type": "object" +}, "industryVertical": { "description": "Optional. The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: Vertical on Engine has to match vertical of the DataStore linked to the engine.", "enum": [ @@ -25503,10 +25636,6 @@ "readOnly": true, "type": "string" }, -"user": { -"description": "Optional. The full resource name of the User, in the format of `projects/{project}/locations/{location}/userStores/{user_store}/users/{user_id}`. This field must be a UTF-8 encoded string with a length limit of 2048 characters. If the user field is empty, it's indicating the user has not logged in yet and no User entity is created.", -"type": "string" -}, "userPrincipal": { "description": "Required. Immutable. The user principal of the User, could be email address or other prinical identifier. This field is immutable. Admin assign licenses based on the user principal.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json index 5ee42057c1..8ba03061de 100644 --- a/googleapiclient/discovery_cache/documents/dlp.v2.json +++ b/googleapiclient/discovery_cache/documents/dlp.v2.json @@ -5113,7 +5113,7 @@ } } }, -"revision": "20250511", +"revision": "20250518", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2Action": { @@ -6812,10 +6812,18 @@ "description": "Resource name of the data profile associated with the finding.", "type": "string" }, +"dataSourceType": { +"$ref": "GooglePrivacyDlpV2DataSourceType", +"description": "The type of the resource that was profiled." +}, "findingId": { "description": "A unique identifier for the finding.", "type": "string" }, +"fullResourceName": { +"description": "The [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) of the resource profiled for this finding.", +"type": "string" +}, "infotype": { "$ref": "GooglePrivacyDlpV2InfoType", "description": "The [type of content](https://cloud.google.com/sensitive-data-protection/docs/infotypes-reference) that might have been found." diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json index b9fb12a639..d71acd3b9e 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1.json @@ -1048,7 +1048,7 @@ } } }, -"revision": "20250427", +"revision": "20250521", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { @@ -3685,19 +3685,17 @@ true "description": "User defined name for the property.", "type": "string" }, -"groundingConfig": { -"description": "Grounding config of the entity type.", +"method": { +"description": "Specifies how the entity's value is obtained.", "enum": [ -"GROUNDING_CONFIG_UNSPECIFIED", -"STRICT", -"RELAXED", -"NO_GROUNDING" +"METHOD_UNSPECIFIED", +"EXTRACT", +"DERIVE" ], "enumDescriptions": [ -"Unspecified grounding config. It defaults to `STRICT` grounding.", -"Strict grounding.", -"Relaxed grounding.", -"Allow no grounding." +"Unspecified method. It defaults to `EXTRACT`.", +"The entity's value is directly extracted as-is from the document text.", +"The entity's value is derived through inference and is not necessarily an exact text extraction from the document." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json index 6296814be7..871dc1bd47 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json @@ -1290,7 +1290,7 @@ } } }, -"revision": "20250427", +"revision": "20250521", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { @@ -4813,19 +4813,17 @@ true "description": "User defined name for the property.", "type": "string" }, -"groundingConfig": { -"description": "Grounding config of the entity type.", +"method": { +"description": "Specifies how the entity's value is obtained.", "enum": [ -"GROUNDING_CONFIG_UNSPECIFIED", -"STRICT", -"RELAXED", -"NO_GROUNDING" +"METHOD_UNSPECIFIED", +"EXTRACT", +"DERIVE" ], "enumDescriptions": [ -"Unspecified grounding config. It defaults to `STRICT` grounding.", -"Strict grounding.", -"Relaxed grounding.", -"Allow no grounding." +"Unspecified method. It defaults to `EXTRACT`.", +"The entity's value is directly extracted as-is from the document text.", +"The entity's value is derived through inference and is not necessarily an exact text extraction from the document." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json index 32ef4100e6..27115c4894 100644 --- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json +++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json @@ -132,7 +132,7 @@ } } }, -"revision": "20250329", +"revision": "20250517", "rootUrl": "https://driveactivity.googleapis.com/", "schemas": { "Action": { @@ -1141,7 +1141,7 @@ "The feature which changed restriction settings was not available.", "When restricted, this prevents items from being shared outside the domain.", "When restricted, this prevents direct sharing of individual items.", -"When restricted, this prevents actions like copy, download, and print that might result in uncontrolled duplicates of items.", +"When restricted, this prevents actions like copy, download, and print that might result in uncontrolled duplicates of items. Now deprecated in favor of READERS_CAN_DOWNLOAD.", "When restricted, this prevents use of Drive File Stream.", "When restricted, this limits sharing of folders to managers only." ], diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json index bb5bc89653..9f15215584 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json @@ -206,7 +206,7 @@ } } }, -"revision": "20250505", +"revision": "20250521", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "Date": { @@ -231,6 +231,172 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ApiAuth": { +"description": "The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead.", +"id": "GoogleCloudAiplatformV1beta1ApiAuth", +"properties": { +"apiKeyConfig": { +"$ref": "GoogleCloudAiplatformV1beta1ApiAuthApiKeyConfig", +"description": "The API secret." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ApiAuthApiKeyConfig": { +"description": "The API secret.", +"id": "GoogleCloudAiplatformV1beta1ApiAuthApiKeyConfig", +"properties": { +"apiKeySecretVersion": { +"description": "Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}", +"type": "string" +}, +"apiKeyString": { +"description": "The API key string. Either this or `api_key_secret_version` must be set.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfig": { +"description": "Auth configuration to run the extension.", +"id": "GoogleCloudAiplatformV1beta1AuthConfig", +"properties": { +"apiKeyConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", +"description": "Config for API key auth." +}, +"authType": { +"description": "Type of auth scheme.", +"enum": [ +"AUTH_TYPE_UNSPECIFIED", +"NO_AUTH", +"API_KEY_AUTH", +"HTTP_BASIC_AUTH", +"GOOGLE_SERVICE_ACCOUNT_AUTH", +"OAUTH", +"OIDC_AUTH" +], +"enumDescriptions": [ +"", +"No Auth.", +"API Key Auth.", +"HTTP Basic Auth.", +"Google Service Account Auth.", +"OAuth auth.", +"OpenID Connect (OIDC) Auth." +], +"type": "string" +}, +"googleServiceAccountConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", +"description": "Config for Google Service Account auth." +}, +"httpBasicAuthConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", +"description": "Config for HTTP Basic auth." +}, +"oauthConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", +"description": "Config for user oauth." +}, +"oidcConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", +"description": "Config for user OIDC auth." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig": { +"description": "Config for authentication with API key.", +"id": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", +"properties": { +"apiKeySecret": { +"description": "Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", +"type": "string" +}, +"apiKeyString": { +"description": "Optional. The API key to be used in the request directly.", +"type": "string" +}, +"httpElementLocation": { +"description": "Optional. The location of the API key.", +"enum": [ +"HTTP_IN_UNSPECIFIED", +"HTTP_IN_QUERY", +"HTTP_IN_HEADER", +"HTTP_IN_PATH", +"HTTP_IN_BODY", +"HTTP_IN_COOKIE" +], +"enumDescriptions": [ +"", +"Element is in the HTTP request query.", +"Element is in the HTTP request header.", +"Element is in the HTTP request path.", +"Element is in the HTTP request body.", +"Element is in the HTTP request cookie." +], +"type": "string" +}, +"name": { +"description": "Optional. The parameter name of the API key. E.g. If the API request is \"https://example.com/act?api_key=\", \"api_key\" would be the parameter name.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig": { +"description": "Config for Google Service Account Authentication.", +"id": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", +"properties": { +"serviceAccount": { +"description": "Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig": { +"description": "Config for HTTP Basic Authentication.", +"id": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", +"properties": { +"credentialSecret": { +"description": "Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfigOauthConfig": { +"description": "Config for user oauth.", +"id": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", +"properties": { +"accessToken": { +"description": "Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", +"type": "string" +}, +"serviceAccount": { +"description": "The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1AuthConfigOidcConfig": { +"description": "Config for user OIDC auth.", +"id": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", +"properties": { +"idToken": { +"description": "OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", +"type": "string" +}, +"serviceAccount": { +"description": "The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Blob": { "description": "Content blob.", "id": "GoogleCloudAiplatformV1beta1Blob", @@ -288,7 +454,8 @@ "BLOCKLIST", "PROHIBITED_CONTENT", "SPII", -"MALFORMED_FUNCTION_CALL" +"MALFORMED_FUNCTION_CALL", +"UNEXPECTED_TOOL_CALL" ], "enumDescriptions": [ "The finish reason is unspecified.", @@ -300,7 +467,8 @@ "Token generation stopped because the content contains forbidden terms.", "Token generation stopped for potentially containing prohibited content.", "Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII).", -"The function call generated by the model is invalid." +"The function call generated by the model is invalid.", +"The tool call generated by the model is invalid." ], "readOnly": true, "type": "string" @@ -387,7 +555,7 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1CodeExecutionResult": { -"description": "Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode].", +"description": "Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode].", "id": "GoogleCloudAiplatformV1beta1CodeExecutionResult", "properties": { "outcome": { @@ -527,7 +695,7 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1ExecutableCode": { -"description": "Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE].", +"description": "Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated.", "id": "GoogleCloudAiplatformV1beta1ExecutableCode", "properties": { "code": { @@ -549,6 +717,74 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ExternalApi": { +"description": "Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApi", +"properties": { +"apiAuth": { +"$ref": "GoogleCloudAiplatformV1beta1ApiAuth", +"deprecated": true, +"description": "The authentication config to access the API. Deprecated. Please use auth_config instead." +}, +"apiSpec": { +"description": "The API spec that the external API implements.", +"enum": [ +"API_SPEC_UNSPECIFIED", +"SIMPLE_SEARCH", +"ELASTIC_SEARCH" +], +"enumDescriptions": [ +"Unspecified API spec. This value should not be used.", +"Simple search API spec.", +"Elastic search API spec." +], +"type": "string" +}, +"authConfig": { +"$ref": "GoogleCloudAiplatformV1beta1AuthConfig", +"description": "The authentication config to access the API." +}, +"elasticSearchParams": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams", +"description": "Parameters for the elastic search API." +}, +"endpoint": { +"description": "The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search", +"type": "string" +}, +"simpleSearchParams": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams", +"description": "Parameters for the simple search API." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams": { +"description": "The search parameters to use for the ELASTIC_SEARCH spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApiElasticSearchParams", +"properties": { +"index": { +"description": "The ElasticSearch index to use.", +"type": "string" +}, +"numHits": { +"description": "Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.", +"format": "int32", +"type": "integer" +}, +"searchTemplate": { +"description": "The ElasticSearch search template to use.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams": { +"description": "The search parameters to use for SIMPLE_SEARCH spec.", +"id": "GoogleCloudAiplatformV1beta1ExternalApiSimpleSearchParams", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1FileData": { "description": "URI based data.", "id": "GoogleCloudAiplatformV1beta1FileData", @@ -637,9 +873,17 @@ "$ref": "GoogleCloudAiplatformV1beta1Schema", "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" }, +"parametersJsonSchema": { +"description": "Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { \"type\": \"object\", \"properties\": { \"name\": { \"type\": \"string\" }, \"age\": { \"type\": \"integer\" } }, \"additionalProperties\": false, \"required\": [\"name\", \"age\"], \"propertyOrdering\": [\"name\", \"age\"] } ``` This field is mutually exclusive with `parameters`.", +"type": "any" +}, "response": { "$ref": "GoogleCloudAiplatformV1beta1Schema", "description": "Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function." +}, +"responseJsonSchema": { +"description": "Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`.", +"type": "any" } }, "type": "object" @@ -897,6 +1141,10 @@ "format": "int32", "type": "integer" }, +"enableAffectiveDialog": { +"description": "Optional. If enabled, the model will detect emotions and adapt its responses accordingly.", +"type": "boolean" +}, "frequencyPenalty": { "description": "Optional. Frequency penalties.", "format": "float", @@ -937,6 +1185,10 @@ "format": "float", "type": "number" }, +"responseJsonSchema": { +"description": "Optional. Output schema of the generated response. This is an alternative to `response_schema` that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must be omitted, but `response_mime_type` is required. While the full JSON Schema may be sent, not all features are supported. Specifically, only the following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` property may also be set. Cyclic references are unrolled to a limited degree and, as such, may only be used within non-required properties. (Nullable properties are not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for than those starting as a `$`, may be set.", +"type": "any" +}, "responseLogprobs": { "description": "Optional. If true, export the logprobs results in response.", "type": "boolean" @@ -1515,6 +1767,10 @@ "description": "Optional. Deprecated. This option is no longer supported.", "type": "boolean" }, +"externalApi": { +"$ref": "GoogleCloudAiplatformV1beta1ExternalApi", +"description": "Use data source powered by external API for grounding." +}, "vertexAiSearch": { "$ref": "GoogleCloudAiplatformV1beta1VertexAISearch", "description": "Set to use data source powered by Vertex AI Search." @@ -1992,6 +2248,13 @@ true "description": "Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder", "id": "GoogleCloudAiplatformV1beta1VertexAISearch", "properties": { +"dataStoreSpecs": { +"description": "Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec" +}, +"type": "array" +}, "datastore": { "description": "Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", "type": "string" @@ -2012,6 +2275,21 @@ true }, "type": "object" }, +"GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec": { +"description": "Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec", +"id": "GoogleCloudAiplatformV1beta1VertexAISearchDataStoreSpec", +"properties": { +"dataStore": { +"description": "Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", +"type": "string" +}, +"filter": { +"description": "Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1VertexRagStore": { "description": "Retrieve from Vertex RAG Store for grounding.", "id": "GoogleCloudAiplatformV1beta1VertexRagStore", diff --git a/googleapiclient/discovery_cache/documents/gkebackup.v1.json b/googleapiclient/discovery_cache/documents/gkebackup.v1.json index c10f31db8c..3655ff83ad 100644 --- a/googleapiclient/discovery_cache/documents/gkebackup.v1.json +++ b/googleapiclient/discovery_cache/documents/gkebackup.v1.json @@ -2227,7 +2227,7 @@ } } }, -"revision": "20250507", +"revision": "20250514", "rootUrl": "https://gkebackup.googleapis.com/", "schemas": { "AuditConfig": { @@ -2477,7 +2477,7 @@ "type": "string" }, "destinationProject": { -"description": "Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future.", +"description": "Required. Immutable. The project where Backups are allowed to be stored. The format is `projects/{projectId}` or `projects/{projectNumber}`.", "type": "string" }, "destinationProjectId": { @@ -3783,7 +3783,7 @@ "type": "string" }, "destinationProject": { -"description": "Required. Immutable. The project into which the backups will be restored. The format is `projects/{project}`. Currently, {project} can only be the project number. Support for project IDs will be added in the future.", +"description": "Required. Immutable. The project into which the backups will be restored. The format is `projects/{projectId}` or `projects/{projectNumber}`.", "type": "string" }, "destinationProjectId": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json index 8c033d2453..a4304c23aa 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json @@ -2117,7 +2117,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -6259,7 +6259,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -6304,7 +6303,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json index 244021cdd5..4e3f41bff6 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json @@ -2261,7 +2261,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -6843,7 +6843,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -6888,7 +6887,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", @@ -7061,7 +7059,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -7106,7 +7103,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json index 09363552a1..d36cf2cae5 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json @@ -2117,7 +2117,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -6385,7 +6385,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -6430,7 +6429,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2.json b/googleapiclient/discovery_cache/documents/gkehub.v2.json index 90ba24ebdb..ec3b2b79e0 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2.json @@ -477,7 +477,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -3030,7 +3030,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -3075,7 +3074,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json index 7991b70989..4bfb0382cb 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json @@ -477,7 +477,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -3030,7 +3030,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -3075,7 +3074,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2beta.json b/googleapiclient/discovery_cache/documents/gkehub.v2beta.json index c06e9bc03d..bec2cfc33c 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2beta.json @@ -477,7 +477,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -3030,7 +3030,6 @@ "NODEPOOL_WORKLOAD_IDENTITY_FEDERATION_REQUIRED", "CNI_INSTALLATION_FAILED", "CNI_POD_UNSCHEDULABLE", -"THC_POD_UNSCHEDULABLE", "CLUSTER_HAS_ZERO_NODES", "CANONICAL_SERVICE_ERROR", "UNSUPPORTED_MULTIPLE_CONTROL_PLANES", @@ -3075,7 +3074,6 @@ "Nodepool workload identity federation required error code", "CNI installation failed error code", "CNI pod unschedulable error code", -"THC pod unschedulable error code", "Cluster has zero node code", "Failure to reconcile CanonicalServices", "Multiple control planes unsupported error code", diff --git a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json index c2402c61ba..62ff3872ee 100644 --- a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json +++ b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json @@ -3100,7 +3100,7 @@ } } }, -"revision": "20250423", +"revision": "20250519", "rootUrl": "https://gkeonprem.googleapis.com/", "schemas": { "Authorization": { @@ -4578,6 +4578,10 @@ "localName": { "description": "Optional. The object name of the bare metal cluster custom resource on the associated admin cluster. This field is used to support conflicting resource names when enrolling existing clusters to the API. When not provided, this field will resolve to the bare_metal_cluster_id. Otherwise, it must match the object name of the bare metal cluster custom resource. It is not modifiable outside / beyond the enrollment operation.", "type": "string" +}, +"localNamespace": { +"description": "Optional. The namespace of the cluster.", +"type": "string" } }, "type": "object" @@ -5682,6 +5686,10 @@ "description": "Output only. The VMware admin cluster prepared secrets configuration. It should always be enabled by the Central API, instead of letting users set it.", "readOnly": true }, +"privateRegistryConfig": { +"$ref": "VmwareAdminPrivateRegistryConfig", +"description": "Configuration for registry." +}, "reconciling": { "description": "Output only. If set, there are currently changes in flight to the VMware admin cluster.", "readOnly": true, @@ -5912,6 +5920,21 @@ }, "type": "object" }, +"VmwareAdminPrivateRegistryConfig": { +"description": "VmwareAdminPrivateRegistryConfig represents configuration for admin cluster registry.", +"id": "VmwareAdminPrivateRegistryConfig", +"properties": { +"address": { +"description": "The registry address.", +"type": "string" +}, +"caCert": { +"description": "When the container runtime pulls an image from private registry, the registry must prove its identity by presenting a certificate. The registry's certificate is signed by a certificate authority (CA). The container runtime uses the CA's certificate to validate the registry's certificate.", +"type": "string" +} +}, +"type": "object" +}, "VmwareAdminSeesawConfig": { "description": "VmwareSeesawConfig represents configuration parameters for an already existing Seesaw load balancer. IMPORTANT: Please note that the Anthos On-Prem API will not generate or update Seesaw configurations it can only bind a pre-existing configuration to a new user cluster. IMPORTANT: When attempting to create a user cluster with a pre-existing Seesaw load balancer you will need to follow some preparation steps before calling the 'CreateVmwareCluster' API method. First you will need to create the user cluster's namespace via kubectl. The namespace will need to use the following naming convention : -gke-onprem-mgmt or -gke-onprem-mgmt depending on whether you used the 'VmwareCluster.local_name' to disambiguate collisions; for more context see the documentation of 'VmwareCluster.local_name'. Once the namespace is created you will need to create a secret resource via kubectl. This secret will contain copies of your Seesaw credentials. The Secret must be called 'user-cluster-creds' and contain Seesaw's SSH and Cert credentials. The credentials must be keyed with the following names: 'seesaw-ssh-private-key', 'seesaw-ssh-public-key', 'seesaw-ssh-ca-key', 'seesaw-ssh-ca-cert'.", "id": "VmwareAdminSeesawConfig", diff --git a/googleapiclient/discovery_cache/documents/iam.v1.json b/googleapiclient/discovery_cache/documents/iam.v1.json index cbc8c5da36..cfcb166a8c 100644 --- a/googleapiclient/discovery_cache/documents/iam.v1.json +++ b/googleapiclient/discovery_cache/documents/iam.v1.json @@ -3773,7 +3773,7 @@ } } }, -"revision": "20250509", +"revision": "20250516", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AccessRestrictions": { @@ -4337,12 +4337,12 @@ "type": "string" }, "lifetime": { -"description": "Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 10 hours and 30 days. If not specified, this will be defaulted to 24 hours.", +"description": "Optional. Lifetime of the workload certificates issued by the CA pool. Must be between 24 hours and 30 days. If not specified, this will be defaulted to 24 hours.", "format": "google-duration", "type": "string" }, "rotationWindowPercentage": { -"description": "Optional. Rotation window percentage indicating when certificate rotation should be initiated based on remaining lifetime. Must be between 10 and 80. If not specified, this will be defaulted to 50.", +"description": "Optional. Rotation window percentage, the percentage of remaining lifetime after which certificate rotation is initiated. Must be between 50 and 80. If no value is specified, rotation window percentage is defaulted to 50.", "format": "int32", "type": "integer" } @@ -5681,7 +5681,7 @@ false "type": "object" }, "TrustStore": { -"description": "Trust store that contains trust anchors and optional intermediate CAs used in PKI to build trust chain and verify a client's identity.", +"description": "Trust store that contains trust anchors and optional intermediate CAs used in PKI to build a trust chain(trust hierarchy) and verify a client's identity.", "id": "TrustStore", "properties": { "intermediateCas": { @@ -6015,9 +6015,9 @@ false "TRUST_DOMAIN" ], "enumDescriptions": [ -"State unspecified. New pools should not use this mode. Pools with an unspecified mode will operate as if they are in FEDERATION_ONLY mode.", -"FEDERATION_ONLY mode pools can only be used for federating external workload identities into Google Cloud. Unless otherwise noted, no structure or format constraints are applied to workload identities in a FEDERATION_ONLY mode pool, and you may not create any resources within the pool besides providers.", -"TRUST_DOMAIN mode pools can be used to assign identities to either external workloads or those hosted on Google Cloud. All identities within a TRUST_DOMAIN mode pool must consist of a single namespace and individual workload identifier. The subject identifier for all identities must conform to the following format: `ns//sa/` WorkloadIdentityPoolProviders cannot be created within TRUST_DOMAIN mode pools." +"State unspecified. New pools should not use this mode. Pools with an unspecified mode will operate as if they are in federation-only mode.", +"Federation-only mode. Federation-only pools can only be used for federating external workload identities into Google Cloud. Unless otherwise noted, no structure or format constraints are applied to workload identities in a federation-only pool, and you cannot create any resources within the pool besides providers.", +"Trust-domain mode. Trust-domain pools can be used to assign identities to Google Cloud workloads. All identities within a trust-domain pool must consist of a single namespace and individual workload identifier. The subject identifier for all identities must conform to the following format: `ns//sa/` WorkloadIdentityPoolProviders cannot be created within trust-domain pools." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/integrations.v1.json b/googleapiclient/discovery_cache/documents/integrations.v1.json index 3d610be1b0..93c485f89f 100644 --- a/googleapiclient/discovery_cache/documents/integrations.v1.json +++ b/googleapiclient/discovery_cache/documents/integrations.v1.json @@ -741,6 +741,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"provisionClientPostProcessor": { +"description": "Perform post provisioning steps after client is provisioned.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clients:provisionClientPostProcessor", +"httpMethod": "POST", +"id": "integrations.projects.locations.clients.provisionClientPostProcessor", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. Required: The ID of the GCP Project to be provisioned.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/clients:provisionClientPostProcessor", +"request": { +"$ref": "GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorRequest" +}, +"response": { +"$ref": "GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "replace": { "description": "Update run-as service account for provisioned client", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clients:replace", @@ -4810,7 +4838,7 @@ } } }, -"revision": "20250506", +"revision": "20250521", "rootUrl": "https://integrations.googleapis.com/", "schemas": { "CrmlogErrorCode": { @@ -11084,6 +11112,10 @@ false "description": "Optional. Indicates the client enables internal IP feature, this is applicable for internal clients only.", "type": "boolean" }, +"enableManagedAiFeatures": { +"description": "Optional. Indicates if the Cloud Companion APIs will be used in the tenant project, i.e. if customer can use the managed AI features for free.", +"type": "boolean" +}, "enableVariableMasking": { "description": "Optional. True if variable masking feature should be turned on for this region", "type": "boolean" @@ -13521,6 +13553,36 @@ false }, "type": "object" }, +"GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorRequest": { +"description": "Request for PostProvisioning rpc call.", +"id": "GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorRequest", +"properties": { +"workflows": { +"description": "Optional. Indicate which workflows to create", +"items": { +"enum": [ +"SAMPLE_INTEGRATIONS_UNSPECIFIED", +"SAMPLE_WORKFLOW_ECOM_PROCESSING", +"EXECUTE_CONNECTOR_TOOL_WORKFLOW" +], +"enumDescriptions": [ +"Default value, should not be used.", +"Ecom processing workflow", +"Connector tool workflow" +], +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorResponse": { +"description": "Response for PostProvisioning rpc call.", +"id": "GoogleCloudIntegrationsV1alphaProvisionClientPostProcessorResponse", +"properties": {}, +"type": "object" +}, "GoogleCloudIntegrationsV1alphaProvisionClientRequest": { "description": "Request for the Provision rpc", "id": "GoogleCloudIntegrationsV1alphaProvisionClientRequest", @@ -13537,6 +13599,10 @@ false "description": "Optional. Indicates if the client should be allowed to make HTTP calls.", "type": "boolean" }, +"enableManagedAiFeatures": { +"description": "Optional. Indicates if the client should be allowed to use managed AI features, i.e. using Cloud Companion APIs of the tenant project. This will allow the customers to use features like Troubleshooting, OpenAPI spec enrichment, etc. for free.", +"type": "boolean" +}, "provisionGmek": { "deprecated": true, "description": "Optional. Deprecated. Indicates provision with GMEK or CMEK. This field is deprecated and the provision would always be GMEK if cloud_kms_config is not present in the request.", diff --git a/googleapiclient/discovery_cache/documents/kmsinventory.v1.json b/googleapiclient/discovery_cache/documents/kmsinventory.v1.json index 0c888fbab3..31aa9885c5 100644 --- a/googleapiclient/discovery_cache/documents/kmsinventory.v1.json +++ b/googleapiclient/discovery_cache/documents/kmsinventory.v1.json @@ -242,7 +242,7 @@ } } }, -"revision": "20250302", +"revision": "20250518", "rootUrl": "https://kmsinventory.googleapis.com/", "schemas": { "GoogleCloudKmsInventoryV1ListCryptoKeysResponse": { @@ -659,7 +659,7 @@ "This version is still being generated. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", "This version may be used for cryptographic operations.", "This version may not be used, but the key material is still available, and the version can be placed back into the ENABLED state.", -"This key material of this version is destroyed and no longer stored. This version may only become ENABLED again if this version is reimport_eligible and the original key material is reimported with a call to KeyManagementService.ImportCryptoKeyVersion.", +"The key material of this version is destroyed and no longer stored. This version may only become ENABLED again if this version is reimport_eligible and the original key material is reimported with a call to KeyManagementService.ImportCryptoKeyVersion.", "This version is scheduled for destruction, and will be destroyed soon. Call RestoreCryptoKeyVersion to put it back into the DISABLED state.", "This version is still being imported. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", "This version was not imported successfully. It may not be used, enabled, disabled, or destroyed. The submitted key material has been discarded. Additional details can be found in CryptoKeyVersion.import_failure_reason.", diff --git a/googleapiclient/discovery_cache/documents/language.v1.json b/googleapiclient/discovery_cache/documents/language.v1.json index 6a00297869..07e37637d7 100644 --- a/googleapiclient/discovery_cache/documents/language.v1.json +++ b/googleapiclient/discovery_cache/documents/language.v1.json @@ -246,7 +246,7 @@ } } }, -"revision": "20250511", +"revision": "20250518", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -691,6 +691,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -709,6 +710,7 @@ "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", +"GPU-based machine, skip quota reporting.", "COMPUTE_OPTIMIZED", "", "", @@ -766,6 +768,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1058,6 +1061,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1434,6 +1438,7 @@ "NVIDIA_TESLA_A100", "NVIDIA_A100_80GB", "NVIDIA_B200", +"NVIDIA_GB200", "NVIDIA_TESLA_K80", "NVIDIA_L4", "NVIDIA_TESLA_P100", @@ -1457,6 +1462,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1503,6 +1509,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1795,6 +1802,7 @@ "", "", "", +"", "" ], "type": "string" @@ -2193,6 +2201,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -2485,6 +2494,7 @@ "", "", "", +"", "" ], "type": "string" @@ -2501,6 +2511,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -2519,6 +2530,7 @@ "", "", "", +"", "COMPUTE_OPTIMIZED", "", "", diff --git a/googleapiclient/discovery_cache/documents/language.v1beta2.json b/googleapiclient/discovery_cache/documents/language.v1beta2.json index 71552070d4..2791037663 100644 --- a/googleapiclient/discovery_cache/documents/language.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/language.v1beta2.json @@ -246,7 +246,7 @@ } } }, -"revision": "20250511", +"revision": "20250518", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -691,6 +691,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -709,6 +710,7 @@ "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", +"GPU-based machine, skip quota reporting.", "COMPUTE_OPTIMIZED", "", "", @@ -766,6 +768,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1058,6 +1061,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1452,6 +1456,7 @@ "NVIDIA_TESLA_A100", "NVIDIA_A100_80GB", "NVIDIA_B200", +"NVIDIA_GB200", "NVIDIA_TESLA_K80", "NVIDIA_L4", "NVIDIA_TESLA_P100", @@ -1475,6 +1480,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1521,6 +1527,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1813,6 +1820,7 @@ "", "", "", +"", "" ], "type": "string" @@ -2211,6 +2219,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -2503,6 +2512,7 @@ "", "", "", +"", "" ], "type": "string" @@ -2519,6 +2529,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -2537,6 +2548,7 @@ "", "", "", +"", "COMPUTE_OPTIMIZED", "", "", diff --git a/googleapiclient/discovery_cache/documents/language.v2.json b/googleapiclient/discovery_cache/documents/language.v2.json index f7da96c3d4..eab515d8b5 100644 --- a/googleapiclient/discovery_cache/documents/language.v2.json +++ b/googleapiclient/discovery_cache/documents/language.v2.json @@ -208,7 +208,7 @@ } } }, -"revision": "20250511", +"revision": "20250518", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -516,6 +516,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -534,6 +535,7 @@ "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", "GPU-based machine, skip quota reporting.", +"GPU-based machine, skip quota reporting.", "COMPUTE_OPTIMIZED", "", "", @@ -591,6 +593,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -883,6 +886,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1073,6 +1077,7 @@ "NVIDIA_TESLA_A100", "NVIDIA_A100_80GB", "NVIDIA_B200", +"NVIDIA_GB200", "NVIDIA_TESLA_K80", "NVIDIA_L4", "NVIDIA_TESLA_P100", @@ -1096,6 +1101,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1142,6 +1148,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1434,6 +1441,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1587,6 +1595,7 @@ "A3_ULTRAGPU_8G", "A3_EDGEGPU_8G", "A4_HIGHGPU_8G", +"A4X_HIGHGPU_4G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1879,6 +1888,7 @@ "", "", "", +"", "" ], "type": "string" @@ -1895,6 +1905,7 @@ "A2", "A3", "A4", +"A4X", "C2", "C2D", "CUSTOM", @@ -1913,6 +1924,7 @@ "", "", "", +"", "COMPUTE_OPTIMIZED", "", "", diff --git a/googleapiclient/discovery_cache/documents/logging.v2.json b/googleapiclient/discovery_cache/documents/logging.v2.json index 2d2bcab913..4081162053 100644 --- a/googleapiclient/discovery_cache/documents/logging.v2.json +++ b/googleapiclient/discovery_cache/documents/logging.v2.json @@ -1445,7 +1445,7 @@ "logs": { "methods": { "delete": { -"description": "Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", +"description": "Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", "httpMethod": "DELETE", "id": "logging.billingAccounts.logs.delete", @@ -3534,7 +3534,7 @@ "logs": { "methods": { "delete": { -"description": "Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", +"description": "Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", "flatPath": "v2/folders/{foldersId}/logs/{logsId}", "httpMethod": "DELETE", "id": "logging.folders.logs.delete", @@ -4685,7 +4685,7 @@ "logs": { "methods": { "delete": { -"description": "Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", +"description": "Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", "flatPath": "v2/{v2Id}/{v2Id1}/logs/{logsId}", "httpMethod": "DELETE", "id": "logging.logs.delete", @@ -6364,7 +6364,7 @@ "logs": { "methods": { "delete": { -"description": "Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", +"description": "Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", "httpMethod": "DELETE", "id": "logging.organizations.logs.delete", @@ -8172,7 +8172,7 @@ "logs": { "methods": { "delete": { -"description": "Deletes all the log entries in a log for the _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", +"description": "Deletes all the log entries in a log for the global _Default Log Bucket. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", "flatPath": "v2/projects/{projectsId}/logs/{logsId}", "httpMethod": "DELETE", "id": "logging.projects.logs.delete", @@ -8952,7 +8952,7 @@ } } }, -"revision": "20250411", +"revision": "20250516", "rootUrl": "https://logging.googleapis.com/", "schemas": { "AppHub": { diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json index 5dc28af84d..20ae080c8a 100644 --- a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json @@ -2554,7 +2554,7 @@ } } }, -"revision": "20250515", +"revision": "20250522", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { @@ -4643,11 +4643,6 @@ "description": "Disk free space.", "format": "int64", "type": "string" -}, -"vmwareConfig": { -"$ref": "VmwareDiskConfig", -"deprecated": true, -"description": "VMware disk details. Deprecated" } }, "type": "object" @@ -8928,51 +8923,6 @@ false }, "type": "object" }, -"VmwareDiskConfig": { -"description": "VMware disk config details.", -"id": "VmwareDiskConfig", -"properties": { -"backingType": { -"description": "VMDK backing type.", -"enum": [ -"BACKING_TYPE_UNSPECIFIED", -"BACKING_TYPE_FLAT_V1", -"BACKING_TYPE_FLAT_V2", -"BACKING_TYPE_PMEM", -"BACKING_TYPE_RDM_V1", -"BACKING_TYPE_RDM_V2", -"BACKING_TYPE_SESPARSE", -"BACKING_TYPE_SESPARSE_V1", -"BACKING_TYPE_SESPARSE_V2" -], -"enumDescriptions": [ -"Default value.", -"Flat v1.", -"Flat v2.", -"Persistent memory, also known as Non-Volatile Memory (NVM).", -"Raw Disk Memory v1.", -"Raw Disk Memory v2.", -"SEsparse is a snapshot format introduced in vSphere 5.5 for large disks.", -"SEsparse v1.", -"SEsparse v1." -], -"type": "string" -}, -"rdmCompatibilityMode": { -"description": "RDM compatibility mode.", -"type": "string" -}, -"shared": { -"description": "Is VMDK shared with other VMs.", -"type": "boolean" -}, -"vmdkDiskMode": { -"description": "VMDK disk mode.", -"type": "string" -} -}, -"type": "object" -}, "VmwareEngineMigrationTarget": { "description": "VMWare engine migration target.", "id": "VmwareEngineMigrationTarget", diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json index 9251f9dd2e..ca11dc0c1c 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v3.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json @@ -2720,7 +2720,7 @@ } } }, -"revision": "20250508", +"revision": "20250515", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -3415,7 +3415,7 @@ "id": "Criteria", "properties": { "filter": { -"description": "Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of \"1234567890\", a metric label with an instance name of \"group\", a metadata user label with a key of \"foo\" and a value of \"bar\", and a metadata system label with a key of \"region\" and a value of \"us-central1\": \"filter\": \"resource.labels.instance_id=\\\"1234567890\\\" AND metric.labels.instance_name=\\\"test_group\\\" AND metadata.user_labels.foo=\\\"bar\\\" AND metadata.system_labels.region=\\\"us-central1\\\"\" ", +"description": "Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: \"filter\": \"resource.labels.instance_id=\\\"1234567890\\\" AND metric.labels.instance_name=\\\"test_group\\\" AND metadata.user_labels.foo=\\\"bar\\\" AND metadata.system_labels.region=\\\"us-central1\\\"\" ", "type": "string" }, "policies": { diff --git a/googleapiclient/discovery_cache/documents/netapp.v1.json b/googleapiclient/discovery_cache/documents/netapp.v1.json index 6aa588a7fa..a97f6cbe6b 100644 --- a/googleapiclient/discovery_cache/documents/netapp.v1.json +++ b/googleapiclient/discovery_cache/documents/netapp.v1.json @@ -2270,7 +2270,7 @@ } } }, -"revision": "20250515", +"revision": "20250519", "rootUrl": "https://netapp.googleapis.com/", "schemas": { "ActiveDirectory": { @@ -2861,32 +2861,39 @@ "id": "HybridPeeringDetails", "properties": { "command": { -"description": "Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests.", +"description": "Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests.", +"readOnly": true, "type": "string" }, "commandExpiryTime": { -"description": "Optional. Expiration time for the peering command to be executed on user's ONTAP.", +"description": "Output only. Expiration time for the peering command to be executed on user's ONTAP.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "passphrase": { -"description": "Optional. Temporary passphrase generated to accept cluster peering command.", +"description": "Output only. Temporary passphrase generated to accept cluster peering command.", +"readOnly": true, "type": "string" }, "peerClusterName": { -"description": "Optional. Name of the user's local source cluster to be peered with the destination cluster.", +"description": "Output only. Name of the user's local source cluster to be peered with the destination cluster.", +"readOnly": true, "type": "string" }, "peerSvmName": { -"description": "Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm.", +"description": "Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm.", +"readOnly": true, "type": "string" }, "peerVolumeName": { -"description": "Optional. Name of the user's local source volume to be peered with the destination volume.", +"description": "Output only. Name of the user's local source volume to be peered with the destination volume.", +"readOnly": true, "type": "string" }, "subnetIp": { -"description": "Optional. IP address of the subnet.", +"description": "Output only. IP address of the subnet.", +"readOnly": true, "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/netapp.v1beta1.json b/googleapiclient/discovery_cache/documents/netapp.v1beta1.json index da00eabb9b..1cb641b882 100644 --- a/googleapiclient/discovery_cache/documents/netapp.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/netapp.v1beta1.json @@ -2270,7 +2270,7 @@ } } }, -"revision": "20250515", +"revision": "20250519", "rootUrl": "https://netapp.googleapis.com/", "schemas": { "ActiveDirectory": { @@ -2861,32 +2861,39 @@ "id": "HybridPeeringDetails", "properties": { "command": { -"description": "Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests.", +"description": "Output only. Copy-paste-able commands to be used on user's ONTAP to accept peering requests.", +"readOnly": true, "type": "string" }, "commandExpiryTime": { -"description": "Optional. Expiration time for the peering command to be executed on user's ONTAP.", +"description": "Output only. Expiration time for the peering command to be executed on user's ONTAP.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "passphrase": { -"description": "Optional. Temporary passphrase generated to accept cluster peering command.", +"description": "Output only. Temporary passphrase generated to accept cluster peering command.", +"readOnly": true, "type": "string" }, "peerClusterName": { -"description": "Optional. Name of the user's local source cluster to be peered with the destination cluster.", +"description": "Output only. Name of the user's local source cluster to be peered with the destination cluster.", +"readOnly": true, "type": "string" }, "peerSvmName": { -"description": "Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm.", +"description": "Output only. Name of the user's local source vserver svm to be peered with the destination vserver svm.", +"readOnly": true, "type": "string" }, "peerVolumeName": { -"description": "Optional. Name of the user's local source volume to be peered with the destination volume.", +"description": "Output only. Name of the user's local source volume to be peered with the destination volume.", +"readOnly": true, "type": "string" }, "subnetIp": { -"description": "Optional. IP address of the subnet.", +"description": "Output only. IP address of the subnet.", +"readOnly": true, "type": "string" } }, @@ -2904,6 +2911,24 @@ "description": "Optional. Description of the replication.", "type": "string" }, +"hybridReplicationType": { +"description": "Optional. Type of the hybrid replication.", +"enum": [ +"VOLUME_HYBRID_REPLICATION_TYPE_UNSPECIFIED", +"MIGRATION", +"CONTINUOUS_REPLICATION", +"ONPREM_REPLICATION", +"REVERSE_ONPREM_REPLICATION" +], +"enumDescriptions": [ +"Unspecified hybrid replication type.", +"Hybrid replication type for migration.", +"Hybrid replication type for continuous replication.", +"New field for reversible OnPrem replication, to be used for data protection.", +"New field for reversible OnPrem replication, to be used for data protection." +], +"type": "string" +}, "labels": { "additionalProperties": { "type": "string" @@ -2911,6 +2936,11 @@ "description": "Optional. Labels to be added to the replication as the key value pairs.", "type": "object" }, +"largeVolumeConstituentCount": { +"description": "Optional. Constituent volume count for large volume.", +"format": "int32", +"type": "integer" +}, "peerClusterName": { "description": "Required. Name of the user's local source cluster to be peered with the destination cluster.", "type": "string" @@ -2933,6 +2963,22 @@ "replication": { "description": "Required. Desired name for the replication of this volume.", "type": "string" +}, +"replicationSchedule": { +"description": "Optional. Replication Schedule for the replication created.", +"enum": [ +"HYBRID_REPLICATION_SCHEDULE_UNSPECIFIED", +"EVERY_10_MINUTES", +"HOURLY", +"DAILY" +], +"enumDescriptions": [ +"Unspecified HybridReplicationSchedule", +"Replication happens once every 10 minutes.", +"Replication happens once every hour.", +"Replication happens once every day." +], +"type": "string" } }, "type": "object" @@ -3652,16 +3698,25 @@ "enum": [ "HYBRID_REPLICATION_TYPE_UNSPECIFIED", "MIGRATION", -"CONTINUOUS_REPLICATION" +"CONTINUOUS_REPLICATION", +"ONPREM_REPLICATION", +"REVERSE_ONPREM_REPLICATION" ], "enumDescriptions": [ "Unspecified hybrid replication type.", "Hybrid replication type for migration.", -"Hybrid replication type for continuous replication." +"Hybrid replication type for continuous replication.", +"New field for reversible OnPrem replication, to be used for data protection.", +"Hybrid replication type for incremental Transfer in the reverse direction (GCNV is source and Onprem is destination)" ], "readOnly": true, "type": "string" }, +"hybridReplicationUserCommands": { +"$ref": "UserCommands", +"description": "Output only. Copy pastable snapmirror commands to be executed on onprem cluster by the customer.", +"readOnly": true +}, "labels": { "additionalProperties": { "type": "string" @@ -3678,7 +3733,8 @@ "STOPPED", "TRANSFERRING", "BASELINE_TRANSFERRING", -"ABORTED" +"ABORTED", +"EXTERNALLY_MANAGED" ], "enumDescriptions": [ "Unspecified MirrorState", @@ -3687,7 +3743,8 @@ "Destination volume is not receiving replication transfers.", "Incremental replication is in progress.", "Baseline replication is in progress.", -"Replication is aborted." +"Replication is aborted.", +"Mirror state for when replication is managed from Onprem ONTAP." ], "readOnly": true, "type": "string" @@ -3742,7 +3799,9 @@ "DELETING", "ERROR", "PENDING_CLUSTER_PEERING", -"PENDING_SVM_PEERING" +"PENDING_SVM_PEERING", +"PENDING_REMOTE_RESYNC", +"EXTERNALLY_MANAGED_REPLICATION" ], "enumDescriptions": [ "Unspecified replication State", @@ -3752,7 +3811,9 @@ "Replication is deleting.", "Replication is in error state.", "Replication is waiting for cluster peering to be established.", -"Replication is waiting for SVM peering to be established." +"Replication is waiting for SVM peering to be established.", +"Replication is waiting for Commands to be executed on Onprem ONTAP.", +"Onprem ONTAP is destination and Replication can only be managed from Onprem." ], "readOnly": true, "type": "string" @@ -4264,6 +4325,21 @@ }, "type": "object" }, +"UserCommands": { +"description": "UserCommands contains the commands to be executed by the customer.", +"id": "UserCommands", +"properties": { +"commands": { +"description": "Output only. List of commands to be executed by the customer.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, "ValidateDirectoryServiceRequest": { "description": "ValidateDirectoryServiceRequest validates the directory service policy attached to the storage pool.", "id": "ValidateDirectoryServiceRequest", diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json index 35b64c7fc0..92fead1a3d 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json @@ -764,7 +764,7 @@ } } }, -"revision": "20250423", +"revision": "20250521", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1208,6 +1208,26 @@ false "description": "Details of the final state \"deliver\" and associated resource.", "id": "DeliverInfo", "properties": { +"googleServiceType": { +"description": "Recognized type of a Google Service the packet is delivered to (if applicable).", +"enum": [ +"GOOGLE_SERVICE_TYPE_UNSPECIFIED", +"IAP", +"GFE_PROXY_OR_HEALTH_CHECK_PROBER", +"CLOUD_DNS", +"PRIVATE_GOOGLE_ACCESS", +"SERVERLESS_VPC_ACCESS" +], +"enumDescriptions": [ +"Unspecified Google Service.", +"Identity aware proxy. https://cloud.google.com/iap/docs/using-tcp-forwarding", +"One of two services sharing IP ranges: * Load Balancer proxy * Centralized Health Check prober https://cloud.google.com/load-balancing/docs/firewall-rules", +"Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules", +"private.googleapis.com and restricted.googleapis.com", +"Google API via Private Service Connect. https://cloud.google.com/vpc/docs/configure-private-service-connect-apis Google API via Serverless VPC Access. https://cloud.google.com/vpc/docs/serverless-vpc-access" +], +"type": "string" +}, "ipAddress": { "description": "IP address of the target (if applicable).", "type": "string" @@ -1320,6 +1340,7 @@ false "ROUTE_NEXT_HOP_VPN_TUNNEL_NOT_ESTABLISHED", "ROUTE_NEXT_HOP_FORWARDING_RULE_TYPE_INVALID", "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS", +"NO_ROUTE_FROM_EXTERNAL_IPV6_SOURCE_TO_PRIVATE_IPV6_ADDRESS", "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH", "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH", "PRIVATE_TRAFFIC_TO_INTERNET", @@ -1395,7 +1416,8 @@ false "UNSUPPORTED_ROUTE_MATCHED_FOR_NAT64_DESTINATION", "TRAFFIC_FROM_HYBRID_ENDPOINT_TO_INTERNET_DISALLOWED", "NO_MATCHING_NAT64_GATEWAY", -"LOAD_BALANCER_BACKEND_IP_VERSION_MISMATCH" +"LOAD_BALANCER_BACKEND_IP_VERSION_MISMATCH", +"NO_KNOWN_ROUTE_FROM_NCC_NETWORK_TO_DESTINATION" ], "enumDescriptions": [ "Cause is unspecified.", @@ -1412,7 +1434,8 @@ false "Route's next hop forwarding rule doesn't match next hop IP address.", "Route's next hop VPN tunnel is down (does not have valid IKE SAs).", "Route's next hop forwarding rule type is invalid (it's not a forwarding rule of the internal passthrough load balancer).", -"Packet is sent from the Internet to the private IPv6 address.", +"Packet is sent from the Internet or Google service to the private IPv6 address.", +"Packet is sent from the external IPv6 source address of an instance to the private IPv6 address of an instance.", "The packet does not match a policy-based VPN tunnel local selector.", "The packet does not match a policy-based VPN tunnel remote selector.", "Packet with internal destination address sent to the internet gateway.", @@ -1488,7 +1511,8 @@ false "Packet with destination IP address within the reserved NAT64 range is dropped due to matching a route of an unsupported type.", "Packet could be dropped because hybrid endpoint like a VPN gateway or Interconnect is not allowed to send traffic to the Internet.", "Packet with destination IP address within the reserved NAT64 range is dropped due to no matching NAT gateway in the subnet.", -"Packet is dropped due to being sent to a backend of a passthrough load balancer that doesn't use the same IP version as the frontend." +"Packet is dropped due to being sent to a backend of a passthrough load balancer that doesn't use the same IP version as the frontend.", +"Packet from the unknown NCC network is dropped due to no known route from the source network to the destination IP address." ], "type": "string" }, @@ -2419,7 +2443,7 @@ false "type": "object" }, "NetworkInfo": { -"description": "For display only. Metadata associated with a Compute Engine network. Next ID: 7", +"description": "For display only. Metadata associated with a Compute Engine network.", "id": "NetworkInfo", "properties": { "displayName": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json index 1c51dd323c..81d4a04a4c 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json @@ -973,7 +973,7 @@ } } }, -"revision": "20250423", +"revision": "20250521", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1417,6 +1417,26 @@ false "description": "Details of the final state \"deliver\" and associated resource.", "id": "DeliverInfo", "properties": { +"googleServiceType": { +"description": "Recognized type of a Google Service the packet is delivered to (if applicable).", +"enum": [ +"GOOGLE_SERVICE_TYPE_UNSPECIFIED", +"IAP", +"GFE_PROXY_OR_HEALTH_CHECK_PROBER", +"CLOUD_DNS", +"PRIVATE_GOOGLE_ACCESS", +"SERVERLESS_VPC_ACCESS" +], +"enumDescriptions": [ +"Unspecified Google Service.", +"Identity aware proxy. https://cloud.google.com/iap/docs/using-tcp-forwarding", +"One of two services sharing IP ranges: * Load Balancer proxy * Centralized Health Check prober https://cloud.google.com/load-balancing/docs/firewall-rules", +"Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules", +"private.googleapis.com and restricted.googleapis.com", +"Google API via Serverless VPC Access. https://cloud.google.com/vpc/docs/serverless-vpc-access" +], +"type": "string" +}, "ipAddress": { "description": "IP address of the target (if applicable).", "type": "string" @@ -1529,6 +1549,7 @@ false "ROUTE_NEXT_HOP_VPN_TUNNEL_NOT_ESTABLISHED", "ROUTE_NEXT_HOP_FORWARDING_RULE_TYPE_INVALID", "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS", +"NO_ROUTE_FROM_EXTERNAL_IPV6_SOURCE_TO_PRIVATE_IPV6_ADDRESS", "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH", "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH", "PRIVATE_TRAFFIC_TO_INTERNET", @@ -1604,7 +1625,8 @@ false "UNSUPPORTED_ROUTE_MATCHED_FOR_NAT64_DESTINATION", "TRAFFIC_FROM_HYBRID_ENDPOINT_TO_INTERNET_DISALLOWED", "NO_MATCHING_NAT64_GATEWAY", -"LOAD_BALANCER_BACKEND_IP_VERSION_MISMATCH" +"LOAD_BALANCER_BACKEND_IP_VERSION_MISMATCH", +"NO_KNOWN_ROUTE_FROM_NCC_NETWORK_TO_DESTINATION" ], "enumDescriptions": [ "Cause is unspecified.", @@ -1621,7 +1643,8 @@ false "Route's next hop forwarding rule doesn't match next hop IP address.", "Route's next hop VPN tunnel is down (does not have valid IKE SAs).", "Route's next hop forwarding rule type is invalid (it's not a forwarding rule of the internal passthrough load balancer).", -"Packet is sent from the Internet to the private IPv6 address.", +"Packet is sent from the Internet or Google service to the private IPv6 address.", +"Packet is sent from the external IPv6 source address of an instance to the private IPv6 address of an instance.", "The packet does not match a policy-based VPN tunnel local selector.", "The packet does not match a policy-based VPN tunnel remote selector.", "Packet with internal destination address sent to the internet gateway.", @@ -1697,7 +1720,8 @@ false "Packet with destination IP address within the reserved NAT64 range is dropped due to matching a route of an unsupported type.", "Packet could be dropped because hybrid endpoint like a VPN gateway or Interconnect is not allowed to send traffic to the Internet.", "Packet with destination IP address within the reserved NAT64 range is dropped due to no matching NAT gateway in the subnet.", -"Packet is dropped due to being sent to a backend of a passthrough load balancer that doesn't use the same IP version as the frontend." +"Packet is dropped due to being sent to a backend of a passthrough load balancer that doesn't use the same IP version as the frontend.", +"Packet from the unknown NCC network is dropped due to no known route from the source network to the destination IP address." ], "type": "string" }, @@ -2632,7 +2656,7 @@ false "type": "object" }, "NetworkInfo": { -"description": "For display only. Metadata associated with a Compute Engine network. Next ID: 7", +"description": "For display only. Metadata associated with a Compute Engine network.", "id": "NetworkInfo", "properties": { "displayName": { diff --git a/googleapiclient/discovery_cache/documents/notebooks.v2.json b/googleapiclient/discovery_cache/documents/notebooks.v2.json index dae512ee8a..53556873f7 100644 --- a/googleapiclient/discovery_cache/documents/notebooks.v2.json +++ b/googleapiclient/discovery_cache/documents/notebooks.v2.json @@ -910,7 +910,7 @@ } } }, -"revision": "20250430", +"revision": "20250507", "rootUrl": "https://notebooks.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -1493,7 +1493,7 @@ "type": "string" }, "instanceOwners": { -"description": "Optional. Input only. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance.", +"description": "Optional. The owner of this instance after creation. Format: `alias@example.com` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance.", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json index 101c3f9fe3..511a6bccf6 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json @@ -339,7 +339,7 @@ } } }, -"revision": "20250505", +"revision": "20250519", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { @@ -1122,6 +1122,13 @@ "description": "The CPE of the resource being scanned.", "type": "string" }, +"files": { +"description": "Files that make up the resource described by the occurrence.", +"items": { +"$ref": "File" +}, +"type": "array" +}, "lastScanTime": { "description": "The last time this resource was scanned.", "format": "google-datetime", @@ -1173,6 +1180,21 @@ }, "type": "object" }, +"File": { +"id": "File", +"properties": { +"digest": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"name": { +"type": "string" +} +}, +"type": "object" +}, "FileHashes": { "description": "Container message for hashes of byte content of files, used in source messages to verify integrity of source input to the build.", "id": "FileHashes", diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json index 5b9d47d881..624b05f6d0 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json @@ -339,7 +339,7 @@ } } }, -"revision": "20250505", +"revision": "20250519", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { @@ -1117,6 +1117,13 @@ "description": "The CPE of the resource being scanned.", "type": "string" }, +"files": { +"description": "Files that make up the resource described by the occurrence.", +"items": { +"$ref": "File" +}, +"type": "array" +}, "lastScanTime": { "description": "The last time this resource was scanned.", "format": "google-datetime", @@ -1168,6 +1175,21 @@ }, "type": "object" }, +"File": { +"id": "File", +"properties": { +"digest": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"name": { +"type": "string" +} +}, +"type": "object" +}, "FileHashes": { "description": "Container message for hashes of byte content of files, used in source messages to verify integrity of source input to the build.", "id": "FileHashes", diff --git a/googleapiclient/discovery_cache/documents/oracledatabase.v1.json b/googleapiclient/discovery_cache/documents/oracledatabase.v1.json index 62337dd8a6..946c888bef 100644 --- a/googleapiclient/discovery_cache/documents/oracledatabase.v1.json +++ b/googleapiclient/discovery_cache/documents/oracledatabase.v1.json @@ -970,7 +970,7 @@ "type": "string" }, "parent": { -"description": "Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}.", +"description": "Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. .", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/cloudVmClusters/[^/]+$", "required": true, @@ -1242,7 +1242,7 @@ } } }, -"revision": "20250430", +"revision": "20250509", "rootUrl": "https://oracledatabase.googleapis.com/", "schemas": { "AllConnectionStrings": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json index 0a60096f56..fabef34b36 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json @@ -133,12 +133,12 @@ "type": "string" }, "projectId": { -"description": "The project ID of the Google Cloud Platform project.", +"description": "Required. The project ID of the Google Cloud Platform project.", "location": "query", "type": "string" }, "systemId": { -"description": "A system ID for filtering the results of the request.", +"description": "Optional. A system ID for filtering the results of the request.", "location": "query", "type": "string" } @@ -326,7 +326,7 @@ "type": "string" }, "updateMask": { -"description": "Mask to control which fields get updated. Updates all if not present.", +"description": "Optional. Mask to control which fields get updated. Updates all if not present.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -349,7 +349,7 @@ } } }, -"revision": "20250317", +"revision": "20250518", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json index f0c0221f41..77d25fd546 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json @@ -169,7 +169,7 @@ "type": "string" }, "operatingSystemType": { -"description": "The type of operating system associated with the account.", +"description": "Optional. The type of operating system associated with the account.", "enum": [ "OPERATING_SYSTEM_TYPE_UNSPECIFIED", "LINUX", @@ -184,12 +184,12 @@ "type": "string" }, "projectId": { -"description": "The project ID of the Google Cloud Platform project.", +"description": "Required. The project ID of the Google Cloud Platform project.", "location": "query", "type": "string" }, "systemId": { -"description": "A system ID for filtering the results of the request.", +"description": "Optional. A system ID for filtering the results of the request.", "location": "query", "type": "string" }, @@ -296,7 +296,7 @@ "type": "string" }, "operatingSystemType": { -"description": "The type of operating system associated with the account.", +"description": "Optional. The type of operating system associated with the account.", "enum": [ "OPERATING_SYSTEM_TYPE_UNSPECIFIED", "LINUX", @@ -363,7 +363,7 @@ ], "parameters": { "parent": { -"description": "The parent project and region for the signing request.", +"description": "Required. The parent project and region for the signing request.", "location": "path", "pattern": "^users/[^/]+/projects/[^/]+/locations/[^/]+$", "required": true, @@ -396,7 +396,7 @@ ], "parameters": { "parent": { -"description": "The parent project and region for the signing request.", +"description": "Required. The parent project and region for the signing request.", "location": "path", "pattern": "^users/[^/]+/projects/[^/]+/zones/[^/]+$", "required": true, @@ -519,7 +519,7 @@ "type": "string" }, "updateMask": { -"description": "Mask to control which fields get updated. Updates all if not present.", +"description": "Optional. Mask to control which fields get updated. Updates all if not present.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -542,7 +542,7 @@ } } }, -"revision": "20250504", +"revision": "20250518", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { @@ -742,7 +742,7 @@ "id": "SignSshPublicKeyRequest", "properties": { "sshPublicKey": { -"description": "The SSH public key to sign.", +"description": "Required. The SSH public key to sign.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json index 661dec5418..c1fe4545c9 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json @@ -169,12 +169,12 @@ "type": "string" }, "projectId": { -"description": "The project ID of the Google Cloud Platform project.", +"description": "Required. The project ID of the Google Cloud Platform project.", "location": "query", "type": "string" }, "systemId": { -"description": "A system ID for filtering the results of the request.", +"description": "Optional. A system ID for filtering the results of the request.", "location": "query", "type": "string" }, @@ -333,7 +333,7 @@ ], "parameters": { "parent": { -"description": "The parent project and region for the signing request.", +"description": "Required. The parent project and region for the signing request.", "location": "path", "pattern": "^users/[^/]+/projects/[^/]+/locations/[^/]+$", "required": true, @@ -366,7 +366,7 @@ ], "parameters": { "parent": { -"description": "The parent project and region for the signing request.", +"description": "Required. The parent project and region for the signing request.", "location": "path", "pattern": "^users/[^/]+/projects/[^/]+/zones/[^/]+$", "required": true, @@ -512,7 +512,7 @@ } } }, -"revision": "20250504", +"revision": "20250518", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { @@ -712,7 +712,7 @@ "id": "SignSshPublicKeyRequest", "properties": { "sshPublicKey": { -"description": "The SSH public key to sign.", +"description": "Required. The SSH public key to sign.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/playintegrity.v1.json b/googleapiclient/discovery_cache/documents/playintegrity.v1.json index e136b93b54..9ee45d1d1b 100644 --- a/googleapiclient/discovery_cache/documents/playintegrity.v1.json +++ b/googleapiclient/discovery_cache/documents/playintegrity.v1.json @@ -166,11 +166,39 @@ "scopes": [ "https://www.googleapis.com/auth/playintegrity" ] +}, +"decodePcIntegrityToken": { +"description": "Decodes the PC integrity token and returns the PC token payload.", +"flatPath": "v1/{v1Id}:decodePcIntegrityToken", +"httpMethod": "POST", +"id": "playintegrity.decodePcIntegrityToken", +"parameterOrder": [ +"packageName" +], +"parameters": { +"packageName": { +"description": "Package name of the app the attached integrity token belongs to.", +"location": "path", +"pattern": "^[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+packageName}:decodePcIntegrityToken", +"request": { +"$ref": "DecodePcIntegrityTokenRequest" +}, +"response": { +"$ref": "DecodePcIntegrityTokenResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/playintegrity" +] } } } }, -"revision": "20250223", +"revision": "20250514", "rootUrl": "https://playintegrity.googleapis.com/", "schemas": { "AccountActivity": { @@ -324,6 +352,28 @@ }, "type": "object" }, +"DecodePcIntegrityTokenRequest": { +"description": "Request to decode the PC integrity token.", +"id": "DecodePcIntegrityTokenRequest", +"properties": { +"integrityToken": { +"description": "Encoded integrity token.", +"type": "string" +} +}, +"type": "object" +}, +"DecodePcIntegrityTokenResponse": { +"description": "Response containing the decoded PC integrity payload.", +"id": "DecodePcIntegrityTokenResponse", +"properties": { +"tokenPayloadExternal": { +"$ref": "PcTokenPayloadExternal", +"description": "Plain token payload generated from the decoded integrity token." +} +}, +"type": "object" +}, "DeviceAttributes": { "description": "Contains information about the device for which the integrity token was generated, e.g. Android SDK version.", "id": "DeviceAttributes", @@ -445,6 +495,63 @@ }, "type": "object" }, +"PcDeviceIntegrity": { +"description": "Contains the device attestation information.", +"id": "PcDeviceIntegrity", +"properties": { +"deviceRecognitionVerdict": { +"description": "Details about the integrity of the device the app is running on.", +"items": { +"enum": [ +"DEVICE_RECOGNITION_VERDICT_UNSPECIFIED", +"MEETS_PC_INTEGRITY" +], +"enumDescriptions": [ +"Unspecified device integrity.", +"App is running on Windows Device with Google Desktop Services." +], +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"PcRequestDetails": { +"description": "Contains the integrity request information.", +"id": "PcRequestDetails", +"properties": { +"requestHash": { +"description": "Request hash that was provided in the request.", +"type": "string" +}, +"requestPackageName": { +"description": "Required. Application package name this attestation was requested for. Note: This field makes no guarantees or promises on the caller integrity.", +"type": "string" +}, +"requestTime": { +"description": "Required. Timestamp, of the integrity application request.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"PcTokenPayloadExternal": { +"description": "Contains PC device attestation details.", +"id": "PcTokenPayloadExternal", +"properties": { +"deviceIntegrity": { +"$ref": "PcDeviceIntegrity", +"description": "Required. Details about the device integrity." +}, +"requestDetails": { +"$ref": "PcRequestDetails", +"description": "Required. Details about the integrity request." +} +}, +"type": "object" +}, "RecentDeviceActivity": { "description": "Recent device activity can help developers identify devices that have exhibited hyperactive attestation activity, which could be a sign of an attack or token farming.", "id": "RecentDeviceActivity", diff --git a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json index 266d5456f5..83ad4aacfa 100644 --- a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json @@ -786,7 +786,7 @@ } } }, -"revision": "20250504", +"revision": "20250518", "rootUrl": "https://recaptchaenterprise.googleapis.com/", "schemas": { "GoogleCloudRecaptchaenterpriseV1AccountDefenderAssessment": { @@ -2487,12 +2487,19 @@ true "ACTION_TOKEN", "EXPRESS" ], +"enumDeprecated": [ +false, +false, +false, +false, +true +], "enumDescriptions": [ "Undefined feature.", "Redirects suspicious traffic to reCAPTCHA.", "Use reCAPTCHA session-tokens to protect the whole user session on the site's domain.", "Use reCAPTCHA action-tokens to protect user actions.", -"Use reCAPTCHA WAF express protection to protect any content other than web pages, like APIs and IoT devices." +"Deprecated: Use `express_settings` instead." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/redis.v1.json b/googleapiclient/discovery_cache/documents/redis.v1.json index 689e830ac8..e48696f8aa 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1.json @@ -1073,7 +1073,7 @@ } } }, -"revision": "20250418", +"revision": "20250512", "rootUrl": "https://redis.googleapis.com/", "schemas": { "AOFConfig": { @@ -1562,6 +1562,7 @@ "type": "string" }, "ondemandMaintenance": { +"deprecated": true, "description": "Optional. Input only. Ondemand maintenance for the cluster. This field can be used to trigger ondemand critical update on the cluster.", "type": "boolean" }, diff --git a/googleapiclient/discovery_cache/documents/redis.v1beta1.json b/googleapiclient/discovery_cache/documents/redis.v1beta1.json index 4531405fc5..c890e2b3d5 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1beta1.json @@ -1073,7 +1073,7 @@ } } }, -"revision": "20250418", +"revision": "20250512", "rootUrl": "https://redis.googleapis.com/", "schemas": { "AOFConfig": { @@ -1562,6 +1562,7 @@ "type": "string" }, "ondemandMaintenance": { +"deprecated": true, "description": "Optional. Input only. Ondemand maintenance for the cluster. This field can be used to trigger ondemand critical update on the cluster.", "type": "boolean" }, diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index 791c826e27..8de8037532 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -2223,7 +2223,7 @@ } } }, -"revision": "20250508", +"revision": "20250515", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -3216,6 +3216,21 @@ }, "type": "object" }, +"GoogleCloudRetailV2DoubleList": { +"description": "A message with a list of double values.", +"id": "GoogleCloudRetailV2DoubleList", +"properties": { +"values": { +"description": "The list of double values.", +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2ExperimentInfo": { "description": "Metadata for active A/B testing experiment.", "id": "GoogleCloudRetailV2ExperimentInfo", @@ -5755,6 +5770,13 @@ "description": "If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key \"sku1\" with field mask \"products.color_info\" indicates there is a match between \"sku1\" ColorInfo and the query.", "type": "object" }, +"modelScores": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2DoubleList" +}, +"description": "Google provided available scores.", +"type": "object" +}, "personalLabels": { "description": "Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before.", "items": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index a133a427a9..1e0aab0cf7 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -2800,7 +2800,7 @@ } } }, -"revision": "20250501", +"revision": "20250515", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -4824,6 +4824,21 @@ }, "type": "object" }, +"GoogleCloudRetailV2alphaDoubleList": { +"description": "A message with a list of double values.", +"id": "GoogleCloudRetailV2alphaDoubleList", +"properties": { +"values": { +"description": "The list of double values.", +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2alphaEnrollSolutionMetadata": { "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata", @@ -5746,8 +5761,14 @@ "description": "Merchant Center Feed filter criterion.", "id": "GoogleCloudRetailV2alphaMerchantCenterAccountLinkMerchantCenterFeedFilter", "properties": { +"dataSourceId": { +"description": "AFM data source ID.", +"format": "int64", +"type": "string" +}, "primaryFeedId": { -"description": "Merchant Center primary feed ID.", +"deprecated": true, +"description": "Merchant Center primary feed ID. Deprecated: use data_source_id instead.", "format": "int64", "type": "string" }, @@ -5762,8 +5783,14 @@ "description": "Merchant Center Feed filter criterion.", "id": "GoogleCloudRetailV2alphaMerchantCenterFeedFilter", "properties": { +"dataSourceId": { +"description": "AFM data source ID.", +"format": "int64", +"type": "string" +}, "primaryFeedId": { -"description": "Merchant Center primary feed ID.", +"deprecated": true, +"description": "Merchant Center primary feed ID. Deprecated: use data_source_id instead.", "format": "int64", "type": "string" }, @@ -7907,6 +7934,13 @@ "description": "If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key \"sku1\" with field mask \"products.color_info\" indicates there is a match between \"sku1\" ColorInfo and the query.", "type": "object" }, +"modelScores": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2alphaDoubleList" +}, +"description": "Google provided available scores.", +"type": "object" +}, "personalLabels": { "description": "Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before.", "items": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index fd5ff74b0b..a5e9b0993b 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -2368,7 +2368,7 @@ } } }, -"revision": "20250508", +"revision": "20250515", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -4881,6 +4881,21 @@ }, "type": "object" }, +"GoogleCloudRetailV2betaDoubleList": { +"description": "A message with a list of double values.", +"id": "GoogleCloudRetailV2betaDoubleList", +"properties": { +"values": { +"description": "The list of double values.", +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2betaExperimentInfo": { "description": "Metadata for active A/B testing experiment.", "id": "GoogleCloudRetailV2betaExperimentInfo", @@ -7568,6 +7583,13 @@ "description": "If a variant Product matches the search query, this map indicates which Product fields are matched. The key is the Product.name, the value is a field mask of the matched Product fields. If matched attributes cannot be determined, this map will be empty. For example, a key \"sku1\" with field mask \"products.color_info\" indicates there is a match between \"sku1\" ColorInfo and the query.", "type": "object" }, +"modelScores": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2betaDoubleList" +}, +"description": "Google provided available scores.", +"type": "object" +}, "personalLabels": { "description": "Specifies previous events related to this product for this user based on UserEvent with same SearchRequest.visitor_id or UserInfo.user_id. This is set only when SearchRequest.PersonalizationSpec.mode is SearchRequest.PersonalizationSpec.Mode.AUTO. Possible values: * `purchased`: Indicates that this product has been purchased before.", "items": { diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v5.json b/googleapiclient/discovery_cache/documents/safebrowsing.v5.json index 4a0a534447..fbdcf29064 100644 --- a/googleapiclient/discovery_cache/documents/safebrowsing.v5.json +++ b/googleapiclient/discovery_cache/documents/safebrowsing.v5.json @@ -96,6 +96,115 @@ }, "protocol": "rest", "resources": { +"hashList": { +"methods": { +"get": { +"description": "Get the latest contents of a hash list. A hash list may either by a threat list or a non-threat list such as the Global Cache. This is a standard Get method as defined by https://google.aip.dev/131 and the HTTP method is also GET.", +"flatPath": "v5/hashList/{name}", +"httpMethod": "GET", +"id": "safebrowsing.hashList.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of this particular hash list. It may be a threat list, or it may be the Global Cache.", +"location": "path", +"required": true, +"type": "string" +}, +"sizeConstraints.maxDatabaseEntries": { +"description": "Sets the maximum number of entries that the client is willing to have in the local database for the list. (The server MAY cause the client to store less than this number of entries.) If omitted or zero, no database size limit is set.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"sizeConstraints.maxUpdateEntries": { +"description": "The maximum size in number of entries. The update will not contain more entries than this value, but it is possible that the update will contain fewer entries than this value. This MUST be at least 1024. If omitted or zero, no update size limit is set.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"version": { +"description": "The version of the hash list that the client already has. If this is the first time the client is fetching the hash list, this field MUST be left empty. Otherwise, the client SHOULD supply the version previously received from the server. The client MUST NOT manipulate those bytes. **What's new in V5**: in V4 of the API, this was called `states`; it is now renamed to `version` for clarity.", +"format": "byte", +"location": "query", +"type": "string" +} +}, +"path": "v5/hashList/{name}", +"response": { +"$ref": "GoogleSecuritySafebrowsingV5HashList" +} +} +} +}, +"hashLists": { +"methods": { +"batchGet": { +"description": "Get multiple hash lists at once. It is very common for a client to need to get multiple hash lists. Using this method is preferred over using the regular Get method multiple times. This is a standard batch Get method as defined by https://google.aip.dev/231 and the HTTP method is also GET.", +"flatPath": "v5/hashLists:batchGet", +"httpMethod": "GET", +"id": "safebrowsing.hashLists.batchGet", +"parameterOrder": [], +"parameters": { +"names": { +"description": "Required. The names of the particular hash lists. The list MAY be a threat list, or it may be the Global Cache. The names MUST NOT contain duplicates; if they did, the client will get an error.", +"location": "query", +"repeated": true, +"type": "string" +}, +"sizeConstraints.maxDatabaseEntries": { +"description": "Sets the maximum number of entries that the client is willing to have in the local database for the list. (The server MAY cause the client to store less than this number of entries.) If omitted or zero, no database size limit is set.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"sizeConstraints.maxUpdateEntries": { +"description": "The maximum size in number of entries. The update will not contain more entries than this value, but it is possible that the update will contain fewer entries than this value. This MUST be at least 1024. If omitted or zero, no update size limit is set.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"version": { +"description": "The versions of the hash list that the client already has. If this is the first time the client is fetching the hash lists, the field should be left empty. Otherwise, the client should supply the versions previously received from the server. The client MUST NOT manipulate those bytes. The client need not send the versions in the same order as the corresponding list names. The client may send fewer or more versions in a request than there are names. However the client MUST NOT send multiple versions that correspond to the same name; if it did, the client will get an error. Historical note: in V4 of the API, this was called `states`; it is now renamed to `version` for clarity.", +"format": "byte", +"location": "query", +"repeated": true, +"type": "string" +} +}, +"path": "v5/hashLists:batchGet", +"response": { +"$ref": "GoogleSecuritySafebrowsingV5BatchGetHashListsResponse" +} +}, +"list": { +"description": "List hash lists. In the V5 API, Google will never remove a hash list that has ever been returned by this method. This enables clients to skip using this method and simply hard-code all hash lists they need. This is a standard List method as defined by https://google.aip.dev/132 and the HTTP method is GET.", +"flatPath": "v5/hashLists", +"httpMethod": "GET", +"id": "safebrowsing.hashLists.list", +"parameterOrder": [], +"parameters": { +"pageSize": { +"description": "The maximum number of hash lists to return. The service may return fewer than this value. If unspecified, the server will choose a page size, which may be larger than the number of hash lists so that pagination is not necessary.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "A page token, received from a previous `ListHashLists` call. Provide this to retrieve the subsequent page.", +"location": "query", +"type": "string" +} +}, +"path": "v5/hashLists", +"response": { +"$ref": "GoogleSecuritySafebrowsingV5ListHashListsResponse" +} +} +} +}, "hashes": { "methods": { "search": { @@ -121,9 +230,23 @@ } } }, -"revision": "20240630", +"revision": "20250518", "rootUrl": "https://safebrowsing.googleapis.com/", "schemas": { +"GoogleSecuritySafebrowsingV5BatchGetHashListsResponse": { +"description": "The response containing multiple hash lists.", +"id": "GoogleSecuritySafebrowsingV5BatchGetHashListsResponse", +"properties": { +"hashLists": { +"description": "The hash lists in the same order given in the request.", +"items": { +"$ref": "GoogleSecuritySafebrowsingV5HashList" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleSecuritySafebrowsingV5FullHash": { "description": "The full hash identified with one or more matches.", "id": "GoogleSecuritySafebrowsingV5FullHash", @@ -185,6 +308,275 @@ }, "type": "object" }, +"GoogleSecuritySafebrowsingV5HashList": { +"description": "A list of hashes identified by its name.", +"id": "GoogleSecuritySafebrowsingV5HashList", +"properties": { +"additionsEightBytes": { +"$ref": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded64Bit", +"description": "The 8-byte additions." +}, +"additionsFourBytes": { +"$ref": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded32Bit", +"description": "The 4-byte additions." +}, +"additionsSixteenBytes": { +"$ref": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded128Bit", +"description": "The 16-byte additions." +}, +"additionsThirtyTwoBytes": { +"$ref": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded256Bit", +"description": "The 32-byte additions." +}, +"compressedRemovals": { +"$ref": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded32Bit", +"description": "The Rice-delta encoded version of removal indices. Since each hash list definitely has less than 2^32 entries, the indices are treated as 32-bit integers and encoded." +}, +"metadata": { +"$ref": "GoogleSecuritySafebrowsingV5HashListMetadata", +"description": "Metadata about the hash list. This is not populated by the `GetHashList` method, but this is populated by the `ListHashLists` method." +}, +"minimumWaitDuration": { +"description": "Clients should wait at least this long to get the hash list again. If omitted or zero, clients SHOULD fetch immediately because it indicates that the server has an additional update to be sent to the client, but could not due to the client-specified constraints.", +"format": "google-duration", +"type": "string" +}, +"name": { +"description": "The name of the hash list. Note that the Global Cache is also just a hash list and can be referred to here.", +"type": "string" +}, +"partialUpdate": { +"description": "When true, this is a partial diff containing additions and removals based on what the client already has. When false, this is the complete hash list. When false, the client MUST delete any locally stored version for this hash list. This means that either the version possessed by the client is seriously out-of-date or the client data is believed to be corrupt. The `compressed_removals` field will be empty. When true, the client MUST apply an incremental update by applying removals and then additions.", +"type": "boolean" +}, +"sha256Checksum": { +"description": "The sorted list of all hashes, hashed again with SHA256. This is the checksum for the sorted list of all hashes present in the database after applying the provided update. In the case that no updates were provided, the server will omit this field to indicate that the client should use the existing checksum.", +"format": "byte", +"type": "string" +}, +"version": { +"description": "The version of the hash list. The client MUST NOT manipulate those bytes.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5HashListMetadata": { +"description": "Metadata about a particular hash list.", +"id": "GoogleSecuritySafebrowsingV5HashListMetadata", +"properties": { +"description": { +"description": "A human-readable description about this list. Written in English.", +"type": "string" +}, +"hashLength": { +"description": "The supported hash length for this hash list. Each hash list will support exactly one length. If a different hash length is introduced for the same set of threat types or safe types, it will be introduced as a separate list with a distinct name and respective hash length set.", +"enum": [ +"HASH_LENGTH_UNSPECIFIED", +"FOUR_BYTES", +"EIGHT_BYTES", +"SIXTEEN_BYTES", +"THIRTY_TWO_BYTES" +], +"enumDescriptions": [ +"Unspecified length.", +"Each hash is a four-byte prefix.", +"Each hash is an eight-byte prefix.", +"Each hash is a sixteen-byte prefix.", +"Each hash is a thirty-two-byte full hash." +], +"type": "string" +}, +"likelySafeTypes": { +"description": "Unordered list. If not empty, this specifies that the hash list represents a list of likely safe hashes, and this enumerates the ways they are considered likely safe. This field is mutually exclusive with the threat_types field.", +"items": { +"enum": [ +"LIKELY_SAFE_TYPE_UNSPECIFIED", +"GENERAL_BROWSING", +"CSD", +"DOWNLOAD" +], +"enumDescriptions": [ +"Unknown.", +"This site is likely safe enough for general browsing. This is also known as the global cache.", +"This site is likely safe enough that there is no need to run Client-Side Detection models or password protection checks.", +"This site is likely safe enough that downloads from the site need not be checked." +], +"type": "string" +}, +"type": "array" +}, +"threatTypes": { +"description": "Unordered list. If not empty, this specifies that the hash list is a kind of threat list, and this enumerates the kind of threats associated with hashes or hash prefixes in this hash list. May be empty if the entry does not represent a threat, i.e. in the case that it represents a likely safe type.", +"items": { +"enum": [ +"THREAT_TYPE_UNSPECIFIED", +"MALWARE", +"SOCIAL_ENGINEERING", +"UNWANTED_SOFTWARE", +"POTENTIALLY_HARMFUL_APPLICATION" +], +"enumDescriptions": [ +"Unknown threat type. If this is returned by the server, the client shall disregard the enclosing `FullHashDetail` altogether.", +"Malware threat type. Malware is any software or mobile application specifically designed to harm a computer, a mobile device, the software it's running, or its users. Malware exhibits malicious behavior that can include installing software without user consent and installing harmful software such as viruses. More information can be found [here](https://developers.google.com/search/docs/monitor-debug/security/malware).", +"Social engineering threat type. Social engineering pages falsely purport to act on behalf of a third party with the intention of confusing viewers into performing an action with which the viewer would only trust a true agent of that third party. Phishing is a type of social engineering that tricks the viewer into performing the specific action of providing information, such as login credentials. More information can be found [here](https://developers.google.com/search/docs/monitor-debug/security/social-engineering).", +"Unwanted software threat type. Unwanted software is any software that does not adhere to [Google's Software Principles](https://www.google.com/about/software-principles.html) but isn't malware.", +"Potentially harmful application threat type [as used by Google Play Protect for the Play Store](https://developers.google.com/android/play-protect/potentially-harmful-applications)." +], +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5ListHashListsResponse": { +"description": "The response containing metadata about hash lists.", +"id": "GoogleSecuritySafebrowsingV5ListHashListsResponse", +"properties": { +"hashLists": { +"description": "The hash lists in an arbitrary order. Only metadata about the hash lists will be included, not the contents.", +"items": { +"$ref": "GoogleSecuritySafebrowsingV5HashList" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5RiceDeltaEncoded128Bit": { +"description": "Same as `RiceDeltaEncoded32Bit` except this encodes 128-bit numbers.", +"id": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded128Bit", +"properties": { +"encodedData": { +"description": "The encoded deltas that are encoded using the Golomb-Rice coder.", +"format": "byte", +"type": "string" +}, +"entriesCount": { +"description": "The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.", +"format": "int32", +"type": "integer" +}, +"firstValueHi": { +"description": "The upper 64 bits of the first entry in the encoded data (hashes). If the field is empty, the upper 64 bits are all zero.", +"format": "uint64", +"type": "string" +}, +"firstValueLo": { +"description": "The lower 64 bits of the first entry in the encoded data (hashes). If the field is empty, the lower 64 bits are all zero.", +"format": "uint64", +"type": "string" +}, +"riceParameter": { +"description": "The Golomb-Rice parameter. This parameter is guaranteed to be between 99 and 126, inclusive.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5RiceDeltaEncoded256Bit": { +"description": "Same as `RiceDeltaEncoded32Bit` except this encodes 256-bit numbers.", +"id": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded256Bit", +"properties": { +"encodedData": { +"description": "The encoded deltas that are encoded using the Golomb-Rice coder.", +"format": "byte", +"type": "string" +}, +"entriesCount": { +"description": "The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.", +"format": "int32", +"type": "integer" +}, +"firstValueFirstPart": { +"description": "The first 64 bits of the first entry in the encoded data (hashes). If the field is empty, the first 64 bits are all zero.", +"format": "uint64", +"type": "string" +}, +"firstValueFourthPart": { +"description": "The last 64 bits of the first entry in the encoded data (hashes). If the field is empty, the last 64 bits are all zero.", +"format": "uint64", +"type": "string" +}, +"firstValueSecondPart": { +"description": "The 65 through 128th bits of the first entry in the encoded data (hashes). If the field is empty, the 65 through 128th bits are all zero.", +"format": "uint64", +"type": "string" +}, +"firstValueThirdPart": { +"description": "The 129 through 192th bits of the first entry in the encoded data (hashes). If the field is empty, the 129 through 192th bits are all zero.", +"format": "uint64", +"type": "string" +}, +"riceParameter": { +"description": "The Golomb-Rice parameter. This parameter is guaranteed to be between 227 and 254, inclusive.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5RiceDeltaEncoded32Bit": { +"description": "The Rice-Golomb encoded data. Used for either hashes or removal indices. It is guaranteed that every hash or index here has the same length, and this length is exactly 32 bits. Generally speaking, if we sort all the entries lexicographically, we will find that the higher order bits tend not to change as frequently as lower order bits. This means that if we also take the adjacent difference between entries, the higher order bits have a high probability of being zero. This exploits this high probability of zero by essentially choosing a certain number of bits; all bits more significant than this are likely to be zero so we use unary encoding. See the `rice_parameter` field. Historical note: the Rice-delta encoding was first used in V4 of this API. In V5, two significant improvements were made: firstly, the Rice-delta encoding is now available with hash prefixes longer than 4 bytes; secondly, the encoded data are now treated as big-endian so as to avoid a costly sorting step.", +"id": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded32Bit", +"properties": { +"encodedData": { +"description": "The encoded deltas that are encoded using the Golomb-Rice coder.", +"format": "byte", +"type": "string" +}, +"entriesCount": { +"description": "The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.", +"format": "int32", +"type": "integer" +}, +"firstValue": { +"description": "The first entry in the encoded data (hashes or indices), or, if only a single hash prefix or index was encoded, that entry's value. If the field is empty, the entry is zero.", +"format": "uint32", +"type": "integer" +}, +"riceParameter": { +"description": "The Golomb-Rice parameter. This parameter is guaranteed to be between 3 and 30, inclusive.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleSecuritySafebrowsingV5RiceDeltaEncoded64Bit": { +"description": "Same as `RiceDeltaEncoded32Bit` except this encodes 64-bit numbers.", +"id": "GoogleSecuritySafebrowsingV5RiceDeltaEncoded64Bit", +"properties": { +"encodedData": { +"description": "The encoded deltas that are encoded using the Golomb-Rice coder.", +"format": "byte", +"type": "string" +}, +"entriesCount": { +"description": "The number of entries that are delta encoded in the encoded data. If only a single integer was encoded, this will be zero and the single value will be stored in `first_value`.", +"format": "int32", +"type": "integer" +}, +"firstValue": { +"description": "The first entry in the encoded data (hashes), or, if only a single hash prefix was encoded, that entry's value. If the field is empty, the entry is zero.", +"format": "uint64", +"type": "string" +}, +"riceParameter": { +"description": "The Golomb-Rice parameter. This parameter is guaranteed to be between 35 and 62, inclusive.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleSecuritySafebrowsingV5SearchHashesResponse": { "description": "The response returned after searching threat hashes. If nothing is found, the server will return an OK status (HTTP status code 200) with the `full_hashes` field empty, rather than returning a NOT_FOUND status (HTTP status code 404). **What's new in V5**: There is a separation between `FullHash` and `FullHashDetail`. In the case when a hash represents a site having multiple threats (e.g. both MALWARE and SOCIAL_ENGINEERING), the full hash does not need to be sent twice as in V4. Furthermore, the cache duration has been simplified into a single `cache_duration` field.", "id": "GoogleSecuritySafebrowsingV5SearchHashesResponse", diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index 6d07061546..0e838929fd 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -5938,7 +5938,7 @@ } } }, -"revision": "20250509", +"revision": "20250519", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -7697,6 +7697,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -7717,6 +7724,33 @@ }, "type": "object" }, +"FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "Finding", @@ -10400,6 +10434,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "GoogleCloudSecuritycenterV2FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -10420,6 +10461,33 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "GoogleCloudSecuritycenterV2FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "GoogleCloudSecuritycenterV2Finding", @@ -11115,7 +11183,8 @@ "CONTAINER", "DATA", "IDENTITY_AND_ACCESS", -"VULNERABILITY" +"VULNERABILITY", +"THREAT" ], "enumDescriptions": [ "Unspecified domain category.", @@ -11124,7 +11193,8 @@ "Issues in the container domain.", "Issues in the data domain.", "Issues in the identity and access domain.", -"Issues in the vulnerability domain." +"Issues in the vulnerability domain.", +"Issues in the threat domain." ], "type": "string" } @@ -11611,19 +11681,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -11631,7 +11710,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -11639,6 +11723,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -11661,9 +11747,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -11678,15 +11767,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -11694,6 +11791,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -11701,33 +11800,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -11735,7 +11846,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -11743,6 +11859,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -11765,9 +11883,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -11782,15 +11903,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -11798,6 +11927,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -11805,15 +11936,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -11864,19 +11998,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -11884,7 +12027,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -11892,6 +12040,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -11914,9 +12064,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -11931,15 +12084,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -11947,6 +12108,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -11954,33 +12117,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -11988,7 +12163,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -11996,6 +12176,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -12018,9 +12200,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -12035,15 +12220,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -12051,6 +12244,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -12058,15 +12253,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -13982,19 +14180,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -14002,7 +14209,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -14010,6 +14222,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -14032,9 +14246,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -14049,15 +14266,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -14065,6 +14290,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -14072,33 +14299,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -14106,7 +14345,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -14114,6 +14358,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -14136,9 +14382,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -14153,15 +14402,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -14169,6 +14426,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -14176,15 +14435,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -14235,19 +14497,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -14255,7 +14526,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -14263,6 +14539,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -14285,9 +14563,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -14302,15 +14583,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -14318,6 +14607,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -14325,33 +14616,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -14359,7 +14662,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -14367,6 +14675,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -14389,9 +14699,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -14406,15 +14719,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -14422,6 +14743,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -14429,15 +14752,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index 46294ba7e5..43e84228ed 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -913,7 +913,7 @@ } } }, -"revision": "20250509", +"revision": "20250519", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -2286,6 +2286,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -2306,6 +2313,33 @@ }, "type": "object" }, +"FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "Finding", @@ -5070,6 +5104,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "GoogleCloudSecuritycenterV2FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -5090,6 +5131,33 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "GoogleCloudSecuritycenterV2FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "GoogleCloudSecuritycenterV2Finding", @@ -5785,7 +5853,8 @@ "CONTAINER", "DATA", "IDENTITY_AND_ACCESS", -"VULNERABILITY" +"VULNERABILITY", +"THREAT" ], "enumDescriptions": [ "Unspecified domain category.", @@ -5794,7 +5863,8 @@ "Issues in the container domain.", "Issues in the data domain.", "Issues in the identity and access domain.", -"Issues in the vulnerability domain." +"Issues in the vulnerability domain.", +"Issues in the threat domain." ], "type": "string" } @@ -6281,19 +6351,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -6301,7 +6380,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -6309,6 +6393,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -6331,9 +6417,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -6348,15 +6437,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -6364,6 +6461,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -6371,33 +6470,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -6405,7 +6516,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -6413,6 +6529,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -6435,9 +6553,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -6452,15 +6573,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -6468,6 +6597,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -6475,15 +6606,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -6534,19 +6668,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -6554,7 +6697,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -6562,6 +6710,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -6584,9 +6734,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -6601,15 +6754,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -6617,6 +6778,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -6624,33 +6787,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -6658,7 +6833,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -6666,6 +6846,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -6688,9 +6870,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -6705,15 +6890,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -6721,6 +6914,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -6728,15 +6923,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -8371,19 +8569,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -8391,7 +8598,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -8399,6 +8611,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -8421,9 +8635,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -8438,15 +8655,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -8454,6 +8679,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -8461,33 +8688,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -8495,7 +8734,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -8503,6 +8747,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -8525,9 +8771,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -8542,15 +8791,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -8558,6 +8815,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -8565,15 +8824,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -8624,19 +8886,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -8644,7 +8915,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -8652,6 +8928,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -8674,9 +8952,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -8691,15 +8972,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -8707,6 +8996,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -8714,33 +9005,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -8748,7 +9051,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -8756,6 +9064,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -8778,9 +9088,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -8795,15 +9108,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -8811,6 +9132,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -8818,15 +9141,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json index 2ba0f6bbfb..05fb6f040d 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json @@ -2003,7 +2003,7 @@ } } }, -"revision": "20250509", +"revision": "20250519", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -3383,6 +3383,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -3403,6 +3410,33 @@ }, "type": "object" }, +"FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "Finding", @@ -6063,6 +6097,13 @@ "format": "int64", "type": "string" }, +"operations": { +"description": "Operation(s) performed on a file.", +"items": { +"$ref": "GoogleCloudSecuritycenterV2FileOperation" +}, +"type": "array" +}, "partiallyHashed": { "description": "True when the hash covers only a prefix of the file.", "type": "boolean" @@ -6083,6 +6124,33 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2FileOperation": { +"description": "Operation(s) performed on a file.", +"id": "GoogleCloudSecuritycenterV2FileOperation", +"properties": { +"type": { +"description": "The type of the operation", +"enum": [ +"OPERATION_TYPE_UNSPECIFIED", +"OPEN", +"READ", +"RENAME", +"WRITE", +"EXECUTE" +], +"enumDescriptions": [ +"The operation is unspecified.", +"Represents an open operation.", +"Represents a read operation.", +"Represents a rename operation.", +"Represents a write operation.", +"Represents an execute operation." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2Finding": { "description": "Security Command Center finding. A finding is a record of assessment data like security, risk, health, or privacy, that is ingested into Security Command Center for presentation, notification, analysis, policy testing, and enforcement. For example, a cross-site scripting (XSS) vulnerability in an App Engine application is a finding.", "id": "GoogleCloudSecuritycenterV2Finding", @@ -6778,7 +6846,8 @@ "CONTAINER", "DATA", "IDENTITY_AND_ACCESS", -"VULNERABILITY" +"VULNERABILITY", +"THREAT" ], "enumDescriptions": [ "Unspecified domain category.", @@ -6787,7 +6856,8 @@ "Issues in the container domain.", "Issues in the data domain.", "Issues in the identity and access domain.", -"Issues in the vulnerability domain." +"Issues in the vulnerability domain.", +"Issues in the threat domain." ], "type": "string" } @@ -7274,19 +7344,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -7294,7 +7373,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -7302,6 +7386,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -7324,9 +7410,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -7341,15 +7430,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -7357,6 +7454,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -7364,33 +7463,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -7398,7 +7509,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -7406,6 +7522,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -7428,9 +7546,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -7445,15 +7566,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -7461,6 +7590,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -7468,15 +7599,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -7527,19 +7661,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -7547,7 +7690,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -7555,6 +7703,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -7577,9 +7727,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -7594,15 +7747,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -7610,6 +7771,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -7617,33 +7780,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -7651,7 +7826,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -7659,6 +7839,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -7681,9 +7863,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -7698,15 +7883,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -7714,6 +7907,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -7721,15 +7916,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -9115,19 +9313,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -9135,7 +9342,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -9143,6 +9355,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -9165,9 +9379,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -9182,15 +9399,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -9198,6 +9423,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -9205,33 +9432,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -9239,7 +9478,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -9247,6 +9491,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -9269,9 +9515,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -9286,15 +9535,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -9302,6 +9559,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -9309,15 +9568,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, @@ -9368,19 +9630,28 @@ "TECHNIQUE_UNSPECIFIED", "DATA_OBFUSCATION", "DATA_OBFUSCATION_STEGANOGRAPHY", +"OS_CREDENTIAL_DUMPING", +"OS_CREDENTIAL_DUMPING_PROC_FILESYSTEM", +"OS_CREDENTIAL_DUMPING_ETC_PASSWORD_AND_ETC_SHADOW", +"DATA_FROM_LOCAL_SYSTEM", "AUTOMATED_EXFILTRATION", "OBFUSCATED_FILES_OR_INFO", "STEGANOGRAPHY", "COMPILE_AFTER_DELIVERY", "COMMAND_OBFUSCATION", +"SCHEDULED_TRANSFER", +"SYSTEM_OWNER_USER_DISCOVERY", "MASQUERADING", "MATCH_LEGITIMATE_NAME_OR_LOCATION", "BOOT_OR_LOGON_INITIALIZATION_SCRIPTS", "STARTUP_ITEMS", "NETWORK_SERVICE_DISCOVERY", "SCHEDULED_TASK_JOB", +"SCHEDULED_TASK_JOB_CRON", "CONTAINER_ORCHESTRATION_JOB", "PROCESS_INJECTION", +"INPUT_CAPTURE", +"INPUT_CAPTURE_KEYLOGGING", "PROCESS_DISCOVERY", "COMMAND_AND_SCRIPTING_INTERPRETER", "UNIX_SHELL", @@ -9388,7 +9659,12 @@ "EXPLOITATION_FOR_PRIVILEGE_ESCALATION", "PERMISSION_GROUPS_DISCOVERY", "CLOUD_GROUPS", +"INDICATOR_REMOVAL", +"INDICATOR_REMOVAL_CLEAR_LINUX_OR_MAC_SYSTEM_LOGS", +"INDICATOR_REMOVAL_CLEAR_COMMAND_HISTORY", "INDICATOR_REMOVAL_FILE_DELETION", +"INDICATOR_REMOVAL_TIMESTOMP", +"INDICATOR_REMOVAL_CLEAR_MAILBOX_DATA", "APPLICATION_LAYER_PROTOCOL", "DNS", "SOFTWARE_DEPLOYMENT_TOOLS", @@ -9396,6 +9672,8 @@ "DEFAULT_ACCOUNTS", "LOCAL_ACCOUNTS", "CLOUD_ACCOUNTS", +"FILE_AND_DIRECTORY_DISCOVERY", +"ACCOUNT_DISCOVERY_LOCAL_ACCOUNT", "PROXY", "EXTERNAL_PROXY", "MULTI_HOP_PROXY", @@ -9418,9 +9696,12 @@ "LOCAL_ACCOUNT", "DEOBFUSCATE_DECODE_FILES_OR_INFO", "EXPLOIT_PUBLIC_FACING_APPLICATION", +"SUPPLY_CHAIN_COMPROMISE", +"COMPROMISE_SOFTWARE_DEPENDENCIES_AND_DEVELOPMENT_TOOLS", "USER_EXECUTION", "DOMAIN_POLICY_MODIFICATION", "DATA_DESTRUCTION", +"DATA_ENCRYPTED_FOR_IMPACT", "SERVICE_STOP", "INHIBIT_SYSTEM_RECOVERY", "FIRMWARE_CORRUPTION", @@ -9435,15 +9716,23 @@ "EVENT_TRIGGERED_EXECUTION", "BOOT_OR_LOGON_AUTOSTART_EXECUTION", "KERNEL_MODULES_AND_EXTENSIONS", +"SHORTCUT_MODIFICATION", "ABUSE_ELEVATION_CONTROL_MECHANISM", +"ABUSE_ELEVATION_CONTROL_MECHANISM_SUDO_AND_SUDO_CACHING", "UNSECURED_CREDENTIALS", +"CREDENTIALS_IN_FILES", "BASH_HISTORY", "PRIVATE_KEYS", +"SUBVERT_TRUST_CONTROL", +"INSTALL_ROOT_CERTIFICATE", "COMPROMISE_HOST_SOFTWARE_BINARY", "CREDENTIALS_FROM_PASSWORD_STORES", "MODIFY_AUTHENTICATION_PROCESS", +"PLUGGABLE_AUTHENTICATION_MODULES", "IMPAIR_DEFENSES", "DISABLE_OR_MODIFY_TOOLS", +"INDICATOR_BLOCKING", +"DISABLE_OR_MODIFY_LINUX_AUDIT_SYSTEM", "HIDE_ARTIFACTS", "HIDDEN_FILES_AND_DIRECTORIES", "HIDDEN_USERS", @@ -9451,6 +9740,8 @@ "EXFILTRATION_TO_CLOUD_STORAGE", "DYNAMIC_RESOLUTION", "LATERAL_TOOL_TRANSFER", +"HIJACK_EXECUTION_FLOW", +"HIJACK_EXECUTION_FLOW_DYNAMIC_LINKER_HIJACKING", "MODIFY_CLOUD_COMPUTE_INFRASTRUCTURE", "CREATE_SNAPSHOT", "CLOUD_INFRASTRUCTURE_DISCOVERY", @@ -9458,33 +9749,45 @@ "DEVELOP_CAPABILITIES_MALWARE", "OBTAIN_CAPABILITIES", "OBTAIN_CAPABILITIES_MALWARE", +"OBTAIN_CAPABILITIES_VULNERABILITIES", "ACTIVE_SCANNING", "SCANNING_IP_BLOCKS", "STAGE_CAPABILITIES", +"UPLOAD_MALWARE", "CONTAINER_ADMINISTRATION_COMMAND", "DEPLOY_CONTAINER", "ESCAPE_TO_HOST", "CONTAINER_AND_RESOURCE_DISCOVERY", "REFLECTIVE_CODE_LOADING", -"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES" +"STEAL_OR_FORGE_AUTHENTICATION_CERTIFICATES", +"FINANCIAL_THEFT" ], "enumDescriptions": [ "Unspecified value.", "T1001", "T1001.002", +"T1003", +"T1003.007", +"T1003.008", +"T1005", "T1020", "T1027", "T1027.003", "T1027.004", "T1027.010", +"T1029", +"T1033", "T1036", "T1036.005", "T1037", "T1037.005", "T1046", "T1053", +"T1053.003", "T1053.007", "T1055", +"T1056", +"T1056.001", "T1057", "T1059", "T1059.004", @@ -9492,7 +9795,12 @@ "T1068", "T1069", "T1069.003", +"T1070", +"T1070.002", +"T1070.003", "T1070.004", +"T1070.006", +"T1070.008", "T1071", "T1071.004", "T1072", @@ -9500,6 +9808,8 @@ "T1078.001", "T1078.003", "T1078.004", +"T1083", +"T1087.001", "T1090", "T1090.002", "T1090.003", @@ -9522,9 +9832,12 @@ "T1136.001", "T1140", "T1190", +"T1195", +"T1195.001", "T1204", "T1484", "T1485", +"T1486", "T1489", "T1490", "T1495", @@ -9539,15 +9852,23 @@ "T1546", "T1547", "T1547.006", +"T1547.009", "T1548", +"T1548.003", "T1552", +"T1552.001", "T1552.003", "T1552.004", +"T1553", +"T1553.004", "T1554", "T1555", "T1556", +"T1556.003", "T1562", "T1562.001", +"T1562.006", +"T1562.012", "T1564", "T1564.001", "T1564.002", @@ -9555,6 +9876,8 @@ "T1567.002", "T1568", "T1570", +"T1574", +"T1574.006", "T1578", "T1578.001", "T1580", @@ -9562,15 +9885,18 @@ "T1587.001", "T1588", "T1588.001", +"T1588.006", "T1595", "T1595.001", "T1608", +"T1608.001", "T1609", "T1610", "T1611", "T1613", "T1620", -"T1649" +"T1649", +"T1657" ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json index 6c1d922d6c..9a078b3852 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json @@ -542,7 +542,7 @@ } } }, -"revision": "20250511", +"revision": "20250515", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { @@ -844,6 +844,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -1880,6 +1975,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json index 9a34b81004..3cc6cec43c 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json @@ -343,16 +343,19 @@ ], "parameters": { "force": { +"deprecated": true, "description": "Whether to force the creation of the quota override. Setting the force parameter to 'true' ignores all quota safety checks that would fail the request. QuotaSafetyCheck lists all such validations.", "location": "query", "type": "boolean" }, "forceJustification": { +"deprecated": true, "description": "If force option is set to true, force_justification is suggested to be set to log the reason in audit logs.", "location": "query", "type": "string" }, "forceOnly": { +"deprecated": true, "description": "The list of quota safety checks to ignore before the override mutation. Unlike 'force' field that ignores all the quota safety checks, the 'force_only' field ignores only the specified checks; other checks are still enforced. The 'force' and 'force_only' fields cannot both be set.", "enum": [ "QUOTA_SAFETY_CHECK_UNSPECIFIED", @@ -397,16 +400,19 @@ ], "parameters": { "force": { +"deprecated": true, "description": "Whether to force the deletion of the quota override. Setting the force parameter to 'true' ignores all quota safety checks that would fail the request. QuotaSafetyCheck lists all such validations.", "location": "query", "type": "boolean" }, "forceJustification": { +"deprecated": true, "description": "If force option is set to true, force_justification is suggested to be set to log the reason in audit logs.", "location": "query", "type": "string" }, "forceOnly": { +"deprecated": true, "description": "The list of quota safety checks to ignore before the override mutation. Unlike 'force' field that ignores all the quota safety checks, the 'force_only' field ignores only the specified checks; other checks are still enforced. The 'force' and 'force_only' fields cannot both be set.", "enum": [ "QUOTA_SAFETY_CHECK_UNSPECIFIED", @@ -484,16 +490,19 @@ ], "parameters": { "force": { +"deprecated": true, "description": "Whether to force the update of the quota override. Setting the force parameter to 'true' ignores all quota safety checks that would fail the request. QuotaSafetyCheck lists all such validations.", "location": "query", "type": "boolean" }, "forceJustification": { +"deprecated": true, "description": "If force option is set to true, force_justification is suggested to be set to log the reason in audit logs.", "location": "query", "type": "string" }, "forceOnly": { +"deprecated": true, "description": "The list of quota safety checks to ignore before the override mutation. Unlike 'force' field that ignores all the quota safety checks, the 'force_only' field ignores only the specified checks; other checks are still enforced. The 'force' and 'force_only' fields cannot both be set.", "enum": [ "QUOTA_SAFETY_CHECK_UNSPECIFIED", @@ -715,7 +724,7 @@ } } }, -"revision": "20250511", +"revision": "20250515", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { @@ -968,6 +977,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -1929,6 +2033,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" @@ -3015,14 +3123,17 @@ "id": "V1Beta1ImportProducerOverridesRequest", "properties": { "force": { +"deprecated": true, "description": "Whether to force the creation of the quota overrides. Setting the force parameter to 'true' ignores all quota safety checks that would fail the request. QuotaSafetyCheck lists all such validations.", "type": "boolean" }, "forceJustification": { +"deprecated": true, "description": "If force option is set to true, force_justification is suggested to be set to log the reason in audit logs.", "type": "string" }, "forceOnly": { +"deprecated": true, "description": "The list of quota safety checks to ignore before the override mutation. Unlike 'force' field that ignores all the quota safety checks, the 'force_only' field ignores only the specified checks; other checks are still enforced. The 'force' and 'force_only' fields cannot both be set.", "items": { "enum": [ diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json index 42c56f1c24..e20a749270 100644 --- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json @@ -830,7 +830,7 @@ } } }, -"revision": "20250502", +"revision": "20250519", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { @@ -1142,6 +1142,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -2479,6 +2574,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json index 8b19edd867..765e280fec 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json @@ -454,12 +454,12 @@ ], "parameters": { "network": { -"description": "The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.", +"description": "Required. The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.", "location": "query", "type": "string" }, "parent": { -"description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed.", +"description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -1029,7 +1029,7 @@ } } }, -"revision": "20250506", +"revision": "20250521", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -1483,6 +1483,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -2848,6 +2943,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json index b43f6c7ce8..0804eca118 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json @@ -307,7 +307,7 @@ } } }, -"revision": "20250506", +"revision": "20250518", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -650,6 +650,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -1868,6 +1963,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json index 216bf7cf92..e4e278c19c 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json @@ -426,7 +426,7 @@ } } }, -"revision": "20250511", +"revision": "20250515", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { @@ -894,6 +894,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -2698,6 +2793,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json index 9a33cb4654..36d9ded137 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json @@ -964,7 +964,7 @@ } } }, -"revision": "20250511", +"revision": "20250515", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { @@ -1418,6 +1418,101 @@ }, "type": "object" }, +"BatchingConfigProto": { +"description": "`BatchingConfigProto` defines the batching configuration for an API method.", +"id": "BatchingConfigProto", +"properties": { +"batchDescriptor": { +"$ref": "BatchingDescriptorProto", +"description": "The request and response fields used in batching." +}, +"thresholds": { +"$ref": "BatchingSettingsProto", +"description": "The thresholds which trigger a batched request to be sent." +} +}, +"type": "object" +}, +"BatchingDescriptorProto": { +"description": "`BatchingDescriptorProto` specifies the fields of the request message to be used for batching, and, optionally, the fields of the response message to be used for demultiplexing.", +"id": "BatchingDescriptorProto", +"properties": { +"batchedField": { +"description": "The repeated field in the request message to be aggregated by batching.", +"type": "string" +}, +"discriminatorFields": { +"description": "A list of the fields in the request message. Two requests will be batched together only if the values of every field specified in `request_discriminator_fields` is equal between the two requests.", +"items": { +"type": "string" +}, +"type": "array" +}, +"subresponseField": { +"description": "Optional. When present, indicates the field in the response message to be used to demultiplex the response into multiple response messages, in correspondence with the multiple request messages originally batched together.", +"type": "string" +} +}, +"type": "object" +}, +"BatchingSettingsProto": { +"description": "`BatchingSettingsProto` specifies a set of batching thresholds, each of which acts as a trigger to send a batch of messages as a request. At least one threshold must be positive nonzero.", +"id": "BatchingSettingsProto", +"properties": { +"delayThreshold": { +"description": "The duration after which a batch should be sent, starting from the addition of the first message to that batch.", +"format": "google-duration", +"type": "string" +}, +"elementCountLimit": { +"description": "The maximum number of elements collected in a batch that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"elementCountThreshold": { +"description": "The number of elements of a field collected into a batch which, if exceeded, causes the batch to be sent.", +"format": "int32", +"type": "integer" +}, +"flowControlByteLimit": { +"description": "The maximum size of data allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlElementLimit": { +"description": "The maximum number of elements allowed by flow control.", +"format": "int32", +"type": "integer" +}, +"flowControlLimitExceededBehavior": { +"description": "The behavior to take when the flow control limit is exceeded.", +"enum": [ +"UNSET_BEHAVIOR", +"THROW_EXCEPTION", +"BLOCK", +"IGNORE" +], +"enumDescriptions": [ +"Default behavior, system-defined.", +"Stop operation, raise error.", +"Pause operation until limit clears.", +"Continue operation, disregard limit." +], +"type": "string" +}, +"requestByteLimit": { +"description": "The maximum size of the request that could be accepted by server.", +"format": "int32", +"type": "integer" +}, +"requestByteThreshold": { +"description": "The aggregated size of the batched field which, if exceeded, causes the batch to be sent. This size is computed by aggregating the sizes of the request field to be batched, not of the entire request message.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "Billing": { "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", @@ -3393,6 +3488,10 @@ }, "type": "array" }, +"batching": { +"$ref": "BatchingConfigProto", +"description": "Batching configuration for an API method in client libraries. Example of a YAML configuration: publishing: method_settings: - selector: google.example.v1.ExampleService.BatchCreateExample batching: element_count_threshold: 1000 request_byte_threshold: 100000000 delay_threshold_millis: 10" +}, "longRunning": { "$ref": "LongRunning", "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: 60s # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: 360s # 6 minutes total_poll_timeout: 54000s # 90 minutes" diff --git a/googleapiclient/discovery_cache/documents/sheets.v4.json b/googleapiclient/discovery_cache/documents/sheets.v4.json index 56b4037bc1..d95ba6b398 100644 --- a/googleapiclient/discovery_cache/documents/sheets.v4.json +++ b/googleapiclient/discovery_cache/documents/sheets.v4.json @@ -875,7 +875,7 @@ } } }, -"revision": "20250509", +"revision": "20250513", "rootUrl": "https://sheets.googleapis.com/", "schemas": { "AddBandingRequest": { @@ -1239,10 +1239,15 @@ "id": "BandedRange", "properties": { "bandedRangeId": { -"description": "The ID of the banded range.", +"description": "The ID of the banded range. If unset, refer to banded_range_reference.", "format": "int32", "type": "integer" }, +"bandedRangeReference": { +"description": "Output only. The reference of the banded range, used to identify the ID that is not supported by the banded_range_id.", +"readOnly": true, +"type": "string" +}, "columnProperties": { "$ref": "BandingProperties", "description": "Properties for column bands. These properties are applied on a column- by-column basis throughout all the columns in the range. At least one of row_properties or column_properties must be specified." diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json index 2aa9ae2f8d..5a8a4dd24f 100644 --- a/googleapiclient/discovery_cache/documents/spanner.v1.json +++ b/googleapiclient/discovery_cache/documents/spanner.v1.json @@ -59,11 +59,6 @@ }, { "description": "Regional Endpoint", -"endpointUrl": "https://spanner.us-east7.rep.googleapis.com/", -"location": "us-east7" -}, -{ -"description": "Regional Endpoint", "endpointUrl": "https://spanner.us-south1.rep.googleapis.com/", "location": "us-south1" }, @@ -91,6 +86,11 @@ "description": "Regional Endpoint", "endpointUrl": "https://spanner.us-west8.rep.googleapis.com/", "location": "us-west8" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://spanner.us-east7.rep.googleapis.com/", +"location": "us-east7" } ], "fullyEncodeReservedExpansion": true, @@ -1462,7 +1462,7 @@ "databases": { "methods": { "addSplitPoints": { -"description": "Adds split points to specified tables, indexes of a database.", +"description": "Adds split points to specified tables and indexes of a database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:addSplitPoints", "httpMethod": "POST", "id": "spanner.projects.instances.databases.addSplitPoints", @@ -1471,7 +1471,7 @@ ], "parameters": { "database": { -"description": "Required. The database on whose tables/indexes split points are to be added. Values are of the form `projects//instances//databases/`.", +"description": "Required. The database on whose tables or indexes the split points are to be added. Values are of the form `projects//instances//databases/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -3406,7 +3406,7 @@ } } }, -"revision": "20250304", +"revision": "20250523", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "AdaptMessageRequest": { @@ -3467,7 +3467,7 @@ "id": "AddSplitPointsRequest", "properties": { "initiator": { -"description": "Optional. A user-supplied tag associated with the split points. For example, \"initial_data_load\", \"special_event_1\". Defaults to \"CloudAddSplitPointsAPI\" if not specified. The length of the tag must not exceed 50 characters,else will be trimmed. Only valid UTF8 characters are allowed.", +"description": "Optional. A user-supplied tag associated with the split points. For example, \"initial_data_load\", \"special_event_1\". Defaults to \"CloudAddSplitPointsAPI\" if not specified. The length of the tag must not exceed 50 characters, or else it is trimmed. Only valid UTF8 characters are allowed.", "type": "string" }, "splitPoints": { @@ -4553,6 +4553,21 @@ }, "type": "object" }, +"DatabaseMoveConfig": { +"description": "The configuration for each database in the target instance configuration.", +"id": "DatabaseMoveConfig", +"properties": { +"databaseId": { +"description": "Required. The unique identifier of the database resource in the Instance. For example if the database uri is projects/foo/instances/bar/databases/baz, the id to supply here is baz.", +"type": "string" +}, +"encryptionConfig": { +"$ref": "InstanceEncryptionConfig", +"description": "Optional. Encryption configuration to be used for the database in target configuration. Should be specified for every database which currently uses CMEK encryption. If a database currently uses GOOGLE_MANAGED encryption and a target encryption config is not specified, it defaults to GOOGLE_MANAGED. If a database currently uses Google-managed encryption and a target encryption config is specified, the request is rejected. If a database currently uses CMEK encryption, a target encryption config must be specified. You cannot move a CMEK database to a Google-managed encryption database by MoveInstance." +} +}, +"type": "object" +}, "DatabaseRole": { "description": "A Cloud Spanner database role.", "id": "DatabaseRole", @@ -5334,6 +5349,24 @@ }, "type": "object" }, +"InstanceEncryptionConfig": { +"description": "Encryption configuration for a Cloud Spanner database.", +"id": "InstanceEncryptionConfig", +"properties": { +"kmsKeyName": { +"description": "Optional. This field is maintained for backwards compatibility. For new callers, we recommend using `kms_key_names` to specify the KMS key. `kms_key_name` should only be used if the location of the KMS key matches the database instance\u2019s configuration (location) exactly. E.g. The KMS location is in us-central1 or nam3 and the database instance is also in us-central1 or nam3. The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`.", +"type": "string" +}, +"kmsKeyNames": { +"description": "Optional. Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "InstanceOperationProgress": { "description": "Encapsulates progress related information for a Cloud Spanner long running instance operations.", "id": "InstanceOperationProgress", @@ -5994,6 +6027,13 @@ "targetConfig": { "description": "Required. The target instance configuration where to move the instance. Values are of the form `projects//instanceConfigs/`.", "type": "string" +}, +"targetDatabaseMoveConfigs": { +"description": "Optional. The configuration for each database in the target instance configuration.", +"items": { +"$ref": "DatabaseMoveConfig" +}, +"type": "array" } }, "type": "object" @@ -6519,7 +6559,7 @@ "type": "string" }, "readTimestamp": { -"description": "Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", +"description": "Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read is blocked until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", "format": "google-datetime", "type": "string" }, @@ -7093,7 +7133,7 @@ "type": "object" }, "SplitPoints": { -"description": "The split points of a table/index.", +"description": "The split points of a table or an index.", "id": "SplitPoints", "properties": { "expireTime": { @@ -7106,7 +7146,7 @@ "type": "string" }, "keys": { -"description": "Required. The list of split keys, i.e., the split boundaries.", +"description": "Required. The list of split keys. In essence, the split boundaries.", "items": { "$ref": "Key" }, @@ -7225,7 +7265,7 @@ }, "precommitToken": { "$ref": "MultiplexedSessionPrecommitToken", -"description": "A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." +"description": "A precommit token is included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." }, "readTimestamp": { "description": "For snapshot read-only transactions, the read timestamp chosen for the transaction. Not returned by default: see TransactionOptions.ReadOnly.return_read_timestamp. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", @@ -7236,11 +7276,11 @@ "type": "object" }, "TransactionOptions": { -"description": "Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", +"description": "Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner selects a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit fails with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They block until all conflicting transactions that can be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows are read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner can't perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 is returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that can't be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", "id": "TransactionOptions", "properties": { "excludeTxnFromChangeStreams": { -"description": "When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error.", +"description": "When `exclude_txn_from_change_streams` is set to `true`, it prevents read or write transactions from being tracked in change streams. * If the DDL option `allow_txn_exclusion` is set to `true`, then the updates made within this transaction aren't recorded in the change stream. * If you don't set the DDL option `allow_txn_exclusion` or if it's set to `false`, then the updates made within this transaction are recorded in the change stream. When `exclude_txn_from_change_streams` is set to `false` or not set, modifications from this transaction are recorded in all change streams that are tracking columns modified by these transactions. The `exclude_txn_from_change_streams` option can only be specified for read-write or partitioned DML transactions, otherwise the API returns an `INVALID_ARGUMENT` error.", "type": "boolean" }, "isolationLevel": { @@ -7253,7 +7293,7 @@ "enumDescriptions": [ "Default value. If the value is not specified, the `SERIALIZABLE` isolation level is used.", "All transactions appear as if they executed in a serial order, even if some of the reads, writes, and other operations of distinct transactions actually occurred in parallel. Spanner assigns commit timestamps that reflect the order of committed transactions to implement this property. Spanner offers a stronger guarantee than serializability called external consistency. For further details, please refer to https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability.", -"All reads performed during the transaction observe a consistent snapshot of the database, and the transaction will only successfully commit in the absence of conflicts between its updates and any concurrent updates that have occurred since that snapshot. Consequently, in contrast to `SERIALIZABLE` transactions, only write-write conflicts are detected in snapshot transactions. This isolation level does not support Read-only and Partitioned DML transactions. When `REPEATABLE_READ` is specified on a read-write transaction, the locking semantics default to `OPTIMISTIC`." +"All reads performed during the transaction observe a consistent snapshot of the database, and the transaction is only successfully committed in the absence of conflicts between its updates and any concurrent updates that have occurred since that snapshot. Consequently, in contrast to `SERIALIZABLE` transactions, only write-write conflicts are detected in snapshot transactions. This isolation level does not support Read-only and Partitioned DML transactions. When `REPEATABLE_READ` is specified on a read-write transaction, the locking semantics default to `OPTIMISTIC`." ], "type": "string" }, @@ -7263,7 +7303,7 @@ }, "readOnly": { "$ref": "ReadOnly", -"description": "Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource." +"description": "Transaction does not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource." }, "readWrite": { "$ref": "ReadWrite", diff --git a/googleapiclient/discovery_cache/documents/sqladmin.v1.json b/googleapiclient/discovery_cache/documents/sqladmin.v1.json index 431761f728..bb530c9bfa 100644 --- a/googleapiclient/discovery_cache/documents/sqladmin.v1.json +++ b/googleapiclient/discovery_cache/documents/sqladmin.v1.json @@ -2594,7 +2594,7 @@ } } }, -"revision": "20250508", +"revision": "20250516", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -3946,6 +3946,10 @@ false ], "type": "string" }, +"clearNetwork": { +"description": "Clears private network settings when the instance is restored.", +"type": "boolean" +}, "connectionName": { "description": "Connection name of the Cloud SQL instance used in connection strings.", "type": "string" @@ -7072,7 +7076,8 @@ false "USERS_NOT_CREATED_IN_REPLICA", "UNSUPPORTED_SYSTEM_OBJECTS", "UNSUPPORTED_TABLES_WITH_REPLICA_IDENTITY", -"SELECTED_OBJECTS_NOT_EXIST_ON_SOURCE" +"SELECTED_OBJECTS_NOT_EXIST_ON_SOURCE", +"PSC_ONLY_INSTANCE_WITH_NO_NETWORK_ATTACHMENT_URI" ], "enumDescriptions": [ "", @@ -7128,7 +7133,8 @@ false "The source database has users that aren't created in the replica. First, create all users, which are in the pg_user_mappings table of the source database, in the destination instance. Then, perform the migration.", "The selected objects include system objects that aren't supported for migration.", "The source database has tables with the FULL or NOTHING replica identity. Before starting your migration, either remove the identity or change it to DEFAULT. Note that this is an error and will block the migration.", -"The selected objects don't exist on the source instance." +"The selected objects don't exist on the source instance.", +"PSC only destination instance does not have a network attachment URI." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json b/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json index 5813340d80..2008f44132 100644 --- a/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json +++ b/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json @@ -2594,7 +2594,7 @@ } } }, -"revision": "20250508", +"revision": "20250516", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -3946,6 +3946,10 @@ false ], "type": "string" }, +"clearNetwork": { +"description": "Clears private network settings when the instance is restored.", +"type": "boolean" +}, "connectionName": { "description": "Connection name of the Cloud SQL instance used in connection strings.", "type": "string" @@ -7070,7 +7074,8 @@ false "USERS_NOT_CREATED_IN_REPLICA", "UNSUPPORTED_SYSTEM_OBJECTS", "UNSUPPORTED_TABLES_WITH_REPLICA_IDENTITY", -"SELECTED_OBJECTS_NOT_EXIST_ON_SOURCE" +"SELECTED_OBJECTS_NOT_EXIST_ON_SOURCE", +"PSC_ONLY_INSTANCE_WITH_NO_NETWORK_ATTACHMENT_URI" ], "enumDescriptions": [ "", @@ -7126,7 +7131,8 @@ false "The source database has users that aren't created in the replica. First, create all users, which are in the pg_user_mappings table of the source database, in the destination instance. Then, perform the migration.", "The selected objects include system objects that aren't supported for migration.", "The source database has tables with the FULL or NOTHING replica identity. Before starting your migration, either remove the identity or change it to DEFAULT. Note that this is an error and will block the migration.", -"The selected objects don't exist on the source instance." +"The selected objects don't exist on the source instance.", +"PSC only destination instance does not have a network attachment URI." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json index 8b28eee817..f078968080 100644 --- a/googleapiclient/discovery_cache/documents/storage.v1.json +++ b/googleapiclient/discovery_cache/documents/storage.v1.json @@ -243,7 +243,7 @@ "location": "europe-north2" } ], -"etag": "\"38313932303531353034313530333239303931\"", +"etag": "\"3137363332393730373232373731313039383638\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -4529,7 +4529,7 @@ } } }, -"revision": "20250509", +"revision": "20250521", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AdvanceRelocateBucketOperationRequest": { @@ -4821,6 +4821,10 @@ "ipFilter": { "description": "The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'.", "properties": { +"allowCrossOrgVpcs": { +"description": "Whether to allow cross-org VPCs in the bucket's IP filter configuration.", +"type": "boolean" +}, "mode": { "description": "The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json index 2840e8d31c..e9d2e989dc 100644 --- a/googleapiclient/discovery_cache/documents/tasks.v1.json +++ b/googleapiclient/discovery_cache/documents/tasks.v1.json @@ -475,7 +475,7 @@ "type": "string" }, "parent": { -"description": "Optional. New parent task identifier. If the task is moved to the top level, this parameter is omitted. The task set as parent must exist in the task list and can not be hidden. Exceptions: 1. Assigned tasks can not be set as parent task (have subtasks) or be moved under a parent task (become subtasks). 2. Tasks that are both completed and hidden cannot be nested, so the parent field must be empty.", +"description": "Optional. New parent task identifier. If the task is moved to the top level, this parameter is omitted. The task set as parent must exist in the task list and can not be hidden. Exceptions: 1. Assigned and repeating tasks cannot be set as parent tasks (have subtasks), or be moved under a parent task (become subtasks). 2. Tasks that are both completed and hidden cannot be nested, so the parent field must be empty.", "location": "query", "type": "string" }, @@ -576,7 +576,7 @@ } } }, -"revision": "20250415", +"revision": "20250518", "rootUrl": "https://tasks.googleapis.com/", "schemas": { "AssignmentInfo": {