diff --git a/.goreleaser.yml b/.goreleaser.yml index 47cd770..6e58448 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,7 +1,7 @@ # Visit https://goreleaser.com for documentation on how to customize this # behavior. env: - - PROVIDER_VERSION=3.2.3 + - PROVIDER_VERSION=3.3.0 before: hooks: # this is just an example and not a requirement for provider building/publishing diff --git a/GNUmakefile b/GNUmakefile index 209a97b..1d38740 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -3,8 +3,8 @@ HOSTNAME=delphix.com NAMESPACE=dct NAME=delphix BINARY=terraform-provider-${NAME} -VERSION=3.2.3 -OS_ARCH=darwin_amd64 +VERSION=3.3.0 +OS_ARCH=darwin_arm64 default: install diff --git a/docs/resources/appdata_dsource.md b/docs/resources/appdata_dsource.md index c149638..b3e8b39 100644 --- a/docs/resources/appdata_dsource.md +++ b/docs/resources/appdata_dsource.md @@ -125,27 +125,27 @@ resource "delphix_appdata_dsource" "dsource_name" { * `make_current_account_owner` - (Required) Whether the account creating this reporting schedule must be configured as owner of the reporting schedule. -* `description` - (Optional) The notes/description for the dSource. +* `description` - The notes/description for the dSource. * `link_type` - (Required) The type of link to create. Default is AppDataDirect. * `AppDataDirect` - Represents the AppData specific parameters of a link request for a source directly replicated into the Delphix Engine. * `AppDataStaged` - Represents the AppData specific parameters of a link request for a source with a staging source. -* `name` - (Optional) The unique name of the dSource. If unset, a name is randomly generated. +* `name` - The unique name of the dSource. If unset, a name is randomly generated. -* `staging_mount_base` - (Optional) The base mount point for the NFS mount on the staging environment [AppDataStaged only]. +* `staging_mount_base` - The base mount point for the NFS mount on the staging environment [AppDataStaged only]. * `environment_user` - (Required) The OS user to use for linking. * `staging_environment` - (Required) The environment used as an intermediate stage to pull data into Delphix [AppDataStaged only]. -* `staging_environment_user` - (Optional) The environment user used to access the staging environment [AppDataStaged only]. +* `staging_environment_user` - The environment user used to access the staging environment [AppDataStaged only]. -* `tags` - (Optional) The tags to be created for dSource. This is a map of 2 parameters: +* `tags` - The tags to be created for dSource. This is a map of 2 parameters: * `key` - (Required) Key of the tag * `value` - (Required) Value of the tag -* `ops_pre_sync` - (Optional) Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing +* `ops_pre_sync` - Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing * `name` - Name of the hook * `command` - Command to be executed * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` @@ -162,7 +162,7 @@ resource "delphix_appdata_dsource" "dsource_name" { * `azure_vault_secret_key` - Azure vault key in the key-value store. * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `ops_post_sync` - (Optional) Operations to perform after syncing a created dSource. +* `ops_post_sync` - Operations to perform after syncing a created dSource. * `name` - Name of the hook * `command` - Command to be executed * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` @@ -179,14 +179,14 @@ resource "delphix_appdata_dsource" "dsource_name" { * `azure_vault_secret_key` - Azure vault key in the key-value store. * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `excludes` - (Optional) List of subdirectories in the source to exclude when syncing data.These paths are relative to the root of the source directory. [AppDataDirect only] +* `excludes` - List of subdirectories in the source to exclude when syncing data.These paths are relative to the root of the source directory. [AppDataDirect only] -* `follow_symlinks` - (Optional) List of symlinks in the source to follow when syncing data.These paths are relative to the root of the source directory. All other symlinks are preserved. [AppDataDirect only] +* `follow_symlinks` - List of symlinks in the source to follow when syncing data.These paths are relative to the root of the source directory. All other symlinks are preserved. [AppDataDirect only] -* `parameters` - (Optional) The JSON payload is based on the type of dSource being created. Different data sources require different parameters. +* `parameters` - The JSON payload is based on the type of dSource being created. Different data sources require different parameters. -* `sync_parameters` - (Optional) The JSON payload conforming to the snapshot parameters definition in a LUA toolkit or platform plugin. +* `sync_parameters` - The JSON payload conforming to the snapshot parameters definition in a LUA toolkit or platform plugin. -* `skip_wait_for_snapshot_creation` - (Optional) By default this resource will wait for a snapshot to be created post-dSource creation. This ensure a snapshot is available during the VDB provisioning. This behavior can be skipped by setting this parameter to `true`. +* `skip_wait_for_snapshot_creation` - By default this resource will wait for a snapshot to be created post-dSource creation. This ensure a snapshot is available during the VDB provisioning. This behavior can be skipped by setting this parameter to `true`. -* `wait_time` - (Optional) By default this resource waits 0 minutes for a snapshot to be created. Increase the integer value as needed for larger dSource snapshots. This parameter can be ignored if 'skip_wait_for_snapshot_creation' is set to `true`. +* `wait_time` - By default this resource waits 0 minutes for a snapshot to be created. Increase the integer value as needed for larger dSource snapshots. This parameter can be ignored if 'skip_wait_for_snapshot_creation' is set to `true`. diff --git a/docs/resources/database_postgresql.md b/docs/resources/database_postgresql.md index 1b117ab..5b880fc 100644 --- a/docs/resources/database_postgresql.md +++ b/docs/resources/database_postgresql.md @@ -27,9 +27,9 @@ resource "delphix_database_postgresql" "source" { * `repository_value` - (Required) The Id or Name of the Repository onto which the source will be created.. -* `environment_value` - (Optional) The Id or Name of the environment to create the source on. +* `environment_value` - The Id or Name of the environment to create the source on. -* `engine_value` - (Optional) The Id or Name of the engine to create the source on. +* `engine_value` - The Id or Name of the engine to create the source on. * `id` - The Source object entity ID. @@ -66,3 +66,18 @@ resource "delphix_database_postgresql" "source" { * `tags` - The tags to be created for database. This is a map of 2 parameters: * `key` - Key of the tag * `value` - Value of the tag + +## Import (Beta) + +Use the [`import` block](https://developer.hashicorp.com/terraform/language/import) to add source configs created directly in Data Control Tower into a Terraform state file. + +For example: +```terraform +import { + to = delphix_database_postgresql.source_config_import + id = "source_config_id" +} +``` + +*This is a beta feature. Delphix offers no guarantees of support or compatibility.* + diff --git a/docs/resources/environment.md b/docs/resources/environment.md index 615cfaf..0d8103c 100644 --- a/docs/resources/environment.md +++ b/docs/resources/environment.md @@ -165,42 +165,42 @@ resource "delphix_environment" "fc-tgt-cluster" { * `engine_id` - (Required) The DCT ID of the Engine on which to create the environment. This ID can be obtained by querying the DCT engines API. A Delphix Engine must be registered with DCT first for it to create an Engine ID. * `os_name` - (Required) Operating system type of the environment. Valid values are `[UNIX, WINDOWS]` * `hostname` - (Required) Host Name or IP Address of the host that being added to Delphix. -* `name` - (Optional) The name of the environment. -* `is_cluster` - (Optional) Whether the environment to be created is a cluster. -* `cluster_home` - (Optional) Absolute path to cluster home drectory. This parameter is (Required) for UNIX cluster environments. -* `staging_environment` - (Optional) Id of the environment where Delphix Connector is installed. This is a (Required) parameter when creating Windows source environments. -* `connector_port` - (Optional) Specify port on which Delphix connector will run. This is a (Required) parameter when creating Windows target environments. -* `is_target` - (Optional) Whether the environment to be created is a target cluster environment. This property is used only when creating Windows cluster environments. -* `ssh_port` - (Optional) ssh port of the environment. -* `toolkit_path` - (Optional) The path where Delphix toolkit can be pushed. -* `username` - (Optional) OS username for Delphix. -* `password` - (Optional) OS user's password. -* `vault` - (Optional) The name or reference of the vault from which to read the host credentials. -* `hashicorp_vault_engine` - (Optional) Vault engine name where the credential is stored. -* `hashicorp_vault_secret_path` - (Optional) Path in the vault engine where the credential is stored. -* `hashicorp_vault_username_key` - (Optional) Key for the username in the key-value store. -* `hashicorp_vault_secret_key` - (Optional) Key for the password in the key-value store. -* `cyberark_vault_query_string` - (Optional) Query to find a credential in the CyberArk vault. -* `use_kerberos_authentication` - (Optional) Whether to use kerberos authentication. -* `use_engine_public_key` - (Optional) Whether to use public key authentication. -* `nfs_addresses` - (Optional) Array of ip address or hostnames. Valid values are a list of addresses. For eg: `["192.168.10.2"]` -* `ase_db_username` - (Optional) Username for the SAP ASE database. -* `ase_db_password` - (Optional) Password for the SAP ASE database. -* `ase_db_vault` - (Optional) The name or reference of the vault from which to read the ASE database credentials. -* `ase_db_hashicorp_vault_engine` - (Optional) Vault engine name where the credential is stored. -* `ase_db_hashicorp_vault_secret_path` - (Optional) Path in the vault engine where the credential is stored. -* `ase_db_hashicorp_vault_username_key` - (Optional) Key for the username in the key-value store. -* `ase_db_hashicorp_vault_secret_key` - (Optional) Key for the password in the key-value store. -* `ase_db_cyberark_vault_query_string` - (Optional) Query to find a credential in the CyberArk vault. -* `ase_db_use_kerberos_authentication` - (Optional) Whether to use kerberos authentication for ASE DB discovery. -* `java_home` - (Optional) The path to the user managed Java Development Kit (JDK). If not specified, then the OpenJDK will be used. -* `dsp_keystore_path` - (Optional) DSP keystore path. -* `dsp_keystore_password` - (Optional) DSP keystore password. -* `dsp_keystore_alias` - (Optional) DSP keystore alias. -* `dsp_truststore_path` - (Optional) DSP truststore path. -* `dsp_truststore_password` - (Optional) DSP truststore password. -* `description` - (Optional) The environment description. -* `tags` - (Optional) The tags to be created for this environment. This is a map of 2 parameters: +* `name` - The name of the environment. +* `is_cluster` - Whether the environment to be created is a cluster. +* `cluster_home` - Absolute path to cluster home drectory. This parameter is (Required) for UNIX cluster environments. +* `staging_environment` - Id of the environment where Delphix Connector is installed. This is a (Required) parameter when creating Windows source environments. +* `connector_port` - Specify port on which Delphix connector will run. This is a (Required) parameter when creating Windows target environments. +* `is_target` - Whether the environment to be created is a target cluster environment. This property is used only when creating Windows cluster environments. +* `ssh_port` - ssh port of the environment. +* `toolkit_path` - The path where Delphix toolkit can be pushed. +* `username` - OS username for Delphix. +* `password` - OS user's password. +* `vault` - The name or reference of the vault from which to read the host credentials. +* `hashicorp_vault_engine` - Vault engine name where the credential is stored. +* `hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored. +* `hashicorp_vault_username_key` - Key for the username in the key-value store. +* `hashicorp_vault_secret_key` - Key for the password in the key-value store. +* `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. +* `use_kerberos_authentication` - Whether to use kerberos authentication. +* `use_engine_public_key` - Whether to use public key authentication. +* `nfs_addresses` - Array of ip address or hostnames. Valid values are a list of addresses. For eg: `["192.168.10.2"]` +* `ase_db_username` - Username for the SAP ASE database. +* `ase_db_password` - Password for the SAP ASE database. +* `ase_db_vault` - The name or reference of the vault from which to read the ASE database credentials. +* `ase_db_hashicorp_vault_engine` - Vault engine name where the credential is stored. +* `ase_db_hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored. +* `ase_db_hashicorp_vault_username_key` - Key for the username in the key-value store. +* `ase_db_hashicorp_vault_secret_key` - Key for the password in the key-value store. +* `ase_db_cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. +* `ase_db_use_kerberos_authentication` - Whether to use kerberos authentication for ASE DB discovery. +* `java_home` - The path to the user managed Java Development Kit (JDK). If not specified, then the OpenJDK will be used. +* `dsp_keystore_path` - DSP keystore path. +* `dsp_keystore_password` - DSP keystore password. +* `dsp_keystore_alias` - DSP keystore alias. +* `dsp_truststore_path` - DSP truststore path. +* `dsp_truststore_password` - DSP truststore password. +* `description` - The environment description. +* `tags` - The tags to be created for this environment. This is a map of 2 parameters: * `key` - (Required) Key of the tag * `value` - (Required) Value of the tag diff --git a/docs/resources/oracle_dsource.md b/docs/resources/oracle_dsource.md index 49da1d6..48cff63 100644 --- a/docs/resources/oracle_dsource.md +++ b/docs/resources/oracle_dsource.md @@ -60,97 +60,97 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" { * `make_current_account_owner` - (Required) Whether the account creating this reporting schedule must be configured as owner of the reporting schedule. -* `description` - (Optional) The notes/description for the dSource. +* `description` - The notes/description for the dSource. -* `external_file_path` - (Optional) External file path. +* `external_file_path` - External file path. -* `environment_user_id` - (Optional) Id of the environment user to use for linking. +* `environment_user_id` - Id of the environment user to use for linking. -* `backup_level_enabled` - (Optional) Boolean value indicates whether LEVEL-based incremental backups can be used on the source database. +* `backup_level_enabled` - Boolean value indicates whether LEVEL-based incremental backups can be used on the source database. -* `rman_channels` - (Optional) Number of parallel channels to use. +* `rman_channels` - Number of parallel channels to use. -* `files_per_set` - (Optional) Number of data files to include in each RMAN backup set. +* `files_per_set` - Number of data files to include in each RMAN backup set. -* `check_logical` - (Optional) True if extended block checking should be used for this linked database. +* `check_logical` - True if extended block checking should be used for this linked database. -* `encrypted_linking_enabled` - (Optional) True if SnapSync data from the source should be retrieved through an encrypted connection. Enabling this feature can decrease the performance of SnapSync from the source but has no impact on the performance of VDBs created from the retrieved data. +* `encrypted_linking_enabled` - True if SnapSync data from the source should be retrieved through an encrypted connection. Enabling this feature can decrease the performance of SnapSync from the source but has no impact on the performance of VDBs created from the retrieved data. -* `compressed_linking_enabled` - (Optional) True if SnapSync data from the source should be compressed over the network. Enabling this feature will reduce network bandwidth consumption and may significantly improve throughput, especially over slow network. +* `compressed_linking_enabled` - True if SnapSync data from the source should be compressed over the network. Enabling this feature will reduce network bandwidth consumption and may significantly improve throughput, especially over slow network. -* `bandwidth_limit` - (Optional) Bandwidth limit (MB/s) for SnapSync and LogSync network traffic. A value of 0 means no limit. +* `bandwidth_limit` - Bandwidth limit (MB/s) for SnapSync and LogSync network traffic. A value of 0 means no limit. -* `number_of_connections` - (Optional) Total number of transport connections to use during SnapSync. +* `number_of_connections` - Total number of transport connections to use during SnapSync. -* `diagnose_no_logging_faults` - (Optional) If true, NOLOGGING operations on this container are treated as faults and cannot be resolved manually. +* `diagnose_no_logging_faults` - If true, NOLOGGING operations on this container are treated as faults and cannot be resolved manually. -* `pre_provisioning_enabled` - (Optional) If true, pre-provisioning will be performed after every sync. +* `pre_provisioning_enabled` - If true, pre-provisioning will be performed after every sync. -* `link_now` - (Optional) True if initial load should be done immediately. +* `link_now` - True if initial load should be done immediately. -* `force_full_backup` - (Optional) Whether or not to take another full backup of the source database. +* `force_full_backup` - Whether or not to take another full backup of the source database. -* `double_sync` - (Optional) True if two SnapSyncs should be performed in immediate succession to reduce the number of logs required to provision the snapshot. This may significantly reduce the time necessary to provision from a snapshot. +* `double_sync` - True if two SnapSyncs should be performed in immediate succession to reduce the number of logs required to provision the snapshot. This may significantly reduce the time necessary to provision from a snapshot. -* `skip_space_check` - (Optional) Skip check that tests if there is enough space available to store the database in the Delphix Engine. The Delphix Engine estimates how much space a database will occupy after compression and prevents SnapSync if insufficient space is available. This safeguard can be overridden using this option. This may be useful when linking highly compressible databases. +* `skip_space_check` - Skip check that tests if there is enough space available to store the database in the Delphix Engine. The Delphix Engine estimates how much space a database will occupy after compression and prevents SnapSync if insufficient space is available. This safeguard can be overridden using this option. This may be useful when linking highly compressible databases. -* `do_not_resume` - (Optional) Indicates whether a fresh SnapSync must be started regardless if it was possible to resume the current SnapSync. If true, we will not resume but instead ignore previous progress and backup all datafiles even if already completed from previous failed SnapSync. This does not force a full backup, if an incremental was in progress this will start a new incremental snapshot. +* `do_not_resume` - Indicates whether a fresh SnapSync must be started regardless if it was possible to resume the current SnapSync. If true, we will not resume but instead ignore previous progress and backup all datafiles even if already completed from previous failed SnapSync. This does not force a full backup, if an incremental was in progress this will start a new incremental snapshot. -* `files_for_full_backup` - (Optional) List of datafiles to take a full backup of. This would be useful in situations where certain datafiles could not be backed up during previous SnapSync due to corruption or because they went offline. +* `files_for_full_backup` - List of datafiles to take a full backup of. This would be useful in situations where certain datafiles could not be backed up during previous SnapSync due to corruption or because they went offline. -* `log_sync_mode` - (Optional) LogSync operation mode for this database [ ARCHIVE_ONLY_MODE, ARCHIVE_REDO_MODE, UNDEFINED ]. +* `log_sync_mode` - LogSync operation mode for this database [ ARCHIVE_ONLY_MODE, ARCHIVE_REDO_MODE, UNDEFINED ]. -* `log_sync_interval` - (Optional) Interval between LogSync requests, in seconds. +* `log_sync_interval` - Interval between LogSync requests, in seconds. -* `non_sys_password` - (Optional) Password for non sys user authentication (Single tenant only). +* `non_sys_password` - Password for non sys user authentication (Single tenant only). -* `non_sys_username` - (Optional) Non-SYS database user to access this database. Only required for username-password auth (Single tenant only). +* `non_sys_username` - Non-SYS database user to access this database. Only required for username-password auth (Single tenant only). -* `non_sys_vault` - (Optional) The name or reference of the vault from which to read the database credentials (Single tenant only). +* `non_sys_vault` - The name or reference of the vault from which to read the database credentials (Single tenant only). -* `non_sys_hashicorp_vault_engine` - (Optional) Vault engine name where the credential is stored (Single tenant only). +* `non_sys_hashicorp_vault_engine` - Vault engine name where the credential is stored (Single tenant only). -* `non_sys_hashicorp_vault_secret_path` - (Optional) Path in the vault engine where the credential is stored (Single tenant only). +* `non_sys_hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored (Single tenant only). -* `non_sys_hashicorp_vault_username_key` - (Optional) Hashicorp vault key for the username in the key-value store (Single tenant only). +* `non_sys_hashicorp_vault_username_key` - Hashicorp vault key for the username in the key-value store (Single tenant only). -* `non_sys_hashicorp_vault_secret_key` - (Optional) Hashicorp vault key for the password in the key-value store (Single tenant only). +* `non_sys_hashicorp_vault_secret_key` - Hashicorp vault key for the password in the key-value store (Single tenant only). -* `non_sys_azure_vault_name` - (Optional) Azure key vault name (Single tenant only). +* `non_sys_azure_vault_name` - Azure key vault name (Single tenant only). -* `non_sys_azure_vault_username_key` - (Optional) Azure vault key for the username in the key-value store (Single tenant only). +* `non_sys_azure_vault_username_key` - Azure vault key for the username in the key-value store (Single tenant only). -* `non_sys_azure_vault_secret_key` - (Optional) Azure vault key for the password in the key-value store (Single tenant only). +* `non_sys_azure_vault_secret_key` - Azure vault key for the password in the key-value store (Single tenant only). -* `non_sys_cyberark_vault_query_string` - (Optional) Query to find a credential in the CyberArk vault (Single tenant only). +* `non_sys_cyberark_vault_query_string` - Query to find a credential in the CyberArk vault (Single tenant only). -* `fallback_username` - (Optional) The database fallback username. Optional if bequeath connections are enabled (to be used in case of bequeath connection failures). Only required for username-password auth.. +* `fallback_username` - The database fallback username. Optional if bequeath connections are enabled (to be used in case of bequeath connection failures). Only required for username-password auth.. -* `fallback_password` - (Optional) Password for fallback username. +* `fallback_password` - Password for fallback username. -* `fallback_vault` - (Optional) The name or reference of the vault from which to read the database credentials. +* `fallback_vault` - The name or reference of the vault from which to read the database credentials. -* `fallback_hashicorp_vault_engine` - (Optional) Vault engine name where the credential is stored. +* `fallback_hashicorp_vault_engine` - Vault engine name where the credential is stored. -* `fallback_hashicorp_vault_secret_path` - (Optional) Path in the vault engine where the credential is stored. +* `fallback_hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored. -* `fallback_hashicorp_vault_username_key` - (Optional) Hashicorp vault key for the username in the key-value store. +* `fallback_hashicorp_vault_username_key` - Hashicorp vault key for the username in the key-value store. -* `fallback_hashicorp_vault_secret_key` - (Optional) Hashicorp vault key for the password in the key-value store. +* `fallback_hashicorp_vault_secret_key` - Hashicorp vault key for the password in the key-value store. -* `fallback_azure_vault_name` - (Optional) Azure key vault name. +* `fallback_azure_vault_name` - Azure key vault name. -* `fallback_azure_vault_username_key` - (Optional) Azure vault key for the username in the key-value store. +* `fallback_azure_vault_username_key` - Azure vault key for the username in the key-value store. -* `fallback_azure_vault_secret_key` - (Optional) Azure vault key for the password in the key-value store. +* `fallback_azure_vault_secret_key` - Azure vault key for the password in the key-value store. -* `fallback_cyberark_vault_query_string` - (Optional) Query to find a credential in the CyberArk vault. +* `fallback_cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `tags` - (Optional) The tags to be created for dSource. This is a map of 2 parameters: +* `tags` - The tags to be created for dSource. This is a map of 2 parameters: * `key` - (Required) Key of the tag * `value` - (Required) Value of the tag -* `ops_pre_log_sync` - (Optional) Operations to perform after syncing a created dSource and before running the LogSync. +* `ops_pre_log_sync` - Operations to perform after syncing a created dSource and before running the LogSync. * `name` - Name of the hook * `command` - Command to be executed * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` @@ -167,7 +167,7 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" { * `azure_vault_secret_key` - Azure vault key in the key-value store. * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `ops_pre_sync` - (Optional) Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing +* `ops_pre_sync` - Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing * `name` - Name of the hook * `command` - Command to be executed * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` @@ -184,7 +184,7 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" { * `azure_vault_secret_key` - Azure vault key in the key-value store. * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `ops_post_sync` - (Optional) Operations to perform after syncing a created dSource. +* `ops_post_sync` - Operations to perform after syncing a created dSource. * `name` - Name of the hook * `command` - Command to be executed * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` @@ -201,6 +201,6 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" { * `azure_vault_secret_key` - Azure vault key in the key-value store. * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault. -* `skip_wait_for_snapshot_creation` - (Optional) By default this resource will wait for a snapshot to be created post-dSource creation. This ensure a snapshot is available during the VDB provisioning. This behavior can be skipped by setting this parameter to `true`. +* `skip_wait_for_snapshot_creation` - By default this resource will wait for a snapshot to be created post-dSource creation. This ensure a snapshot is available during the VDB provisioning. This behavior can be skipped by setting this parameter to `true`. -* `wait_time` - (Optional) By default this resource waits 0 minutes for a snapshot to be created. Increase the integer value as needed for larger dSource snapshots. This parameter can be ignored if 'skip_wait_for_snapshot_creation' is set to `true`. \ No newline at end of file +* `wait_time` - By default this resource waits 0 minutes for a snapshot to be created. Increase the integer value as needed for larger dSource snapshots. This parameter can be ignored if 'skip_wait_for_snapshot_creation' is set to `true`. \ No newline at end of file diff --git a/docs/resources/vdb.md b/docs/resources/vdb.md index 2291469..5786d79 100644 --- a/docs/resources/vdb.md +++ b/docs/resources/vdb.md @@ -1,264 +1,234 @@ # Resource: delphix_vdb -In Delphix terminology, a VDB is a database provisioned from either a dSource or another VDB which is a full read/write copy of the source data. +In Delphix terminology, a virtual database (VDB) is a full read/write copy of the source data. It is created (provisioned) from either a dSource or another VDB's data snapshot. A VDB is created and managed by the Delphix Continuous Data Engine. -The VDB resource allows Terraform to create, update, and delete Delphix VDBs. This specifically enables the apply and destroy Terraform commands. Update operation does not support all VDB parameters. All supported parameters are listed below. +The VDB (delphix_vdb) resource allows Terraform to create, update, and delete Delphix VDBs. This specifically enables the `plan`, `apply`, `update`, and `destroy` Terraform commands. All supported parameters are listed below. ## Example Usage -Provisioning can be done in 2 methods, provision by snapshot and provision by timestamp. +Provisioning can be done using one of three methods: provision by snapshot, timestamp, and bookmark. ```hcl # Provision a VDB using latest snapshot. -resource "delphix_vdb" "vdb_name" { +resource "delphix_vdb" "vdb_name_provision_by_snapshot" { auto_select_repository = true - source_data_id = "DATASOURCE_ID" + source_data_id = "" + snapshot_id = "" # Leave empty to select the latest snapshot } -# Provision a VDB using timestamp and post refresh hooks +# Provision a VDB from a bookmark and on a Target environment -resource "delphix_vdb" "vdb_name2" { - provision_type = "timestamp" - auto_select_repository = true - source_data_id = "DATASOURCE_ID" - timestamp = "2021-05-01T08:51:34.148000+00:00" - - post_refresh { - name = "HOOK_NAME" - command = "COMMAND" - } -} - -# Provision a VDB from a bookmark with a single VDB - -resource "delphix_vdb" "test_vdb" { +resource "delphix_vdb" "vdb_name_provision_by_bookmark_on_target_environment" { provision_type = "bookmark" auto_select_repository = true - bookmark_id = "BOOKMARK_ID" - environment_id = "ENV_ID" + bookmark_id = "" + environment_id = "" } -# Provision a VDB using snapshot and pre refresh hooks +# Provision a VDB using timestamp and configure post refresh hook -resource "delphix_vdb" "vdb_name" { - provision_type = "snapshot" +resource "delphix_vdb" "vdb_name_provion_by_timestamp_with_hook" { + provision_type = "timestamp" auto_select_repository = true - source_data_id = "DATASOURCE_ID" + source_data_id = "" + timestamp = "2021-05-01T08:51:34.148000+00:00" # Timestamp must be available on the source dataset. - pre_refresh { - name = "HOOK_NAME" - command = "COMMAND" + post_refresh { + command = "echo \"Hello World\"" + name = "Sample Hook" + shell = "SHELL" } } + ``` ## Argument Reference -* `source_data_id` - (Optional) The ID or name of the source object (dSource or VDB) to provision from. All other objects referenced by the parameters must live on the same engine as the source. +* `provision_type` - The type of provisioning to be carried out. Defaults to snapshot. Valid values are `[snapshot, bookmark, timestamp]` -* `engine_id` - (Optional) The ID or name of the Engine onto which to provision. If the source ID unambiguously identifies a source object, this parameter is unnecessary and ignored. +* `timestamp` - The point in time from which to execute the provision operation. Mutually exclusive with timestamp_in_database_timezone. If the timestamp is not set, selects the latest point. -* `target_group_id` - (Optional) The ID of the group into which the VDB will be provisioned. If unset, a group is selected randomly on the Engine. +* `timestamp_in_database_timezone` - The point in time from which to execute the provision operation, expressed as a date-time in the timezone of the source database. Mutually exclusive with timestamp. -* `name` - (Optional) The unique name of the provisioned VDB within a group. If unset, a name is randomly generated. +* `snapshot_id` - The ID or name of the Snapshot from which to execute the provision operation. If the `snapshot_id` is empty or the paramter is not specified, the latest snapshot is automatically selected. -* `database_name` - (Optional) The name of the database on the target environment. Defaults to name. +* `bookmark_id` - The ID or name of the Bookmark from which to execute the provision operation. The Bookmark must contain only one VDB. -* `cdb_id` - (Optional) The ID of the container database (CDB) to provision an Oracle Multitenant database into. When this is not set, a new vCDB will be provisioned. +* `source_data_id` - The ID or name of the source dataset (dSource, VDB, or Snapshot) to provision from. All other objects referenced by the following parameters must live on the same Continuous Data Engine as the chosen source. -* `cluster_node_ids` - (Optional) The cluster node ids, name or addresses for this provision operation (Oracle RAC Only). +* `engine_id` - The ID or name of the Continuous Data Engine onto which to provision. If the source ID unambiguously identifies a source object, this parameter is unnecessary and ignored. -* `truncate_log_on_checkpoint` - (Optional) Whether to truncate log on checkpoint (ASE only). +* `target_group_id` - The ID of the Continuous Data Engine's Dataset Group into which the VDB will be provisioned. If empty, the "Unassigned" Dataset Group is used. -* `os_username` - (Optional) The name of the privileged user to run the provision operation (Oracle Only). +* `name` - [Updatable] The unique name of the VDB. If empty, a name is randomly generated. -* `os_password` - (Optional) The password of the privileged user to run the provision operation (Oracle Only). +* `environment_id` - The ID or name of the Target environment where to provision the VDB. If "repository_id" unambigously identifies a repository, then this value is ignored. -* `db_username` - (Optional) [Updatable] The username of the database user (Oracle, ASE Only). Only for update. +* `environment_user_id` - [Updatable] The environment user ID to use to connect to the Target environment. -* `db_password` - (Optional) [Updatable] The password of the database user (Oracle, ASE Only). Only for update. +* `repository_id` - The ID of the Target environment's repository where to provision the VDB. A repository typically corresponds to a database installation (Oracle home, database instance, etc). Setting this parameter may implicitly determines the environment where to provision the VDB. -* `environment_id` - (Optional) The ID or name of the target environment where to provision the VDB. If repository_id unambigously identifies a repository, this is unnecessary and ignored. Otherwise, a compatible repository is randomly selected on the environment. +* `auto_select_repository` - TRUE or FALSE value to automatically select a compatible environment and repository. Mutually exclusive with "repository_id". -* `environment_user_id` - (Optional)[Updatable] The environment user ID to use to connect to the target environment. +* `pre_refresh` - [Updatable] The commands to execute on the Target environment before refreshing the VDB. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `repository_id` - (Optional) The ID of the target repository where to provision the VDB. A repository typically corresponds to a database installation (Oracle home, database instance, ...). Setting this attribute implicitly determines the environment where to provision the VDB. +* `post_refresh` - [Updatable] The commands to execute on the Target environment after refreshing the VDB. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `auto_select_repository` - (Optional) Option to automatically select a compatible environment and repository. Mutually exclusive with repository_id. +* `pre_rollback` - [Updatable] The commands to execute on the Target environment before a rollback on the VDB. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `pre_refresh` - (Optional) The commands to execute on the target environment before refreshing the VDB. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `post_rollback` - [Updatable] The commands to execute on the Target environment after a rollback on the VDB. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `post_refresh` - (Optional) The commands to execute on the target environment after refreshing the VDB. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `configure_clone` - [Updatable] The commands to execute on the Target environment when the VDB is created or refreshed. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `pre_rollback` - (Optional) The commands to execute on the target environment before rewinding the VDB. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `pre_snapshot` - [Updatable] The commands to execute on the Target environment before snapshotting a virtual database. These commands can quiesce any data prior to snapshotting. This is a map of five parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `post_rollback` - (Optional) The commands to execute on the target environment after rewinding the VDB. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `post_snapshot` - [Updatable] The commands to execute on the Target environment after snapshotting a virtual database. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `configure_clone` - (Optional) The commands to execute on the target environment when the VDB is created or refreshed. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `pre_start` - [Updatable] The commands to execute on the Target environment before starting a virtual database. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `pre_snapshot` - (Optional) The commands to execute on the target environment before snapshotting a virtual source. These commands can quiesce any data prior to snapshotting. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `post_start` - [Updatable] The commands to execute on the Target environment after starting a virtual database. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `post_snapshot` - (Optional) The commands to execute on the target environment after snapshotting a virtual source. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `pre_stop` - [Updatable] The commands to execute on the Target environment before stopping a virtual database. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `pre_start` - (Optional) The commands to execute on the target environment before starting a virtual source. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` +* `post_stop` - [Updatable] The commands to execute on the Target environment after stopping a virtual database. This is a map of three parameters: + * `name` - Name of the hook. + * `command` - (Required, if hook is specified) Command to be executed. + * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`. -* `post_start` - (Optional) The commands to execute on the target environment after starting a virtual source. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `vdb_restart` - [Updatable] Indicates whether the Continuous Data Engine should automatically restart this virtual database when Target environment reboot is detected. -* `pre_stop` - (Optional) The commands to execute on the target environment before stopping a virtual source. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `snapshot_policy_id` - The ID of the Snapshot Policy for the VDB. -* `post_stop` - (Optional) The commands to execute on the target environment after stopping a virtual source. This is a map of 5 parameters: - * `name` - Name of the hook - * `command` - (Required)Command to be executed - * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]` - * `element_id` - Element ID for the hook - * `has_credentials` - Flag to indicate if it has credentials +* `retention_policy_id` - The ID of the Snapshot Retention Policy for the VDB. -* `vdb_restart` - (Optional) [Updatable] Indicates whether the Engine should automatically restart this virtual source when target host reboot is detected. +* `masked` - TRUE or FALSE boolean to set a VDB as "Masked". Note: You should define a `configure_clone` script in the Hooks step to mask the dataset. The selection of this option will cause the data to be marked as masked, regardless of whether you have defined a script to do so or not. +If you do not define a script to mask the dataset, the data will not be masked unless there is a masking job associated with the dataset. -* `auxiliary_template_id` - (Optional) The ID of the configuration template to apply to the auxiliary container database. This is only relevant when provisioning a Multitenant pluggable database into an existing CDB, i.e when the cdb_id property is set. (Oracle Only) +* `custom_env_vars` - +Environment variable to be set when a VDB is created. See the Continuous Data ENgine documentation for the list of allowed/denied environment variables and rules about substitution. This is an ordered map of key-value pairs. For eg: { "MY_ENV_VAR1": "$ORACLE_HOME", "MY_ENV_VAR2": "$CRS_HOME/after" } -* `template_id` - (Optional) [Updatable] The ID of the target VDB Template (Oracle Only). +* `custom_env_files` - Environment files to be sourced when a VDB is created. This path can be followed by parameters. Paths and parameters are separated by spaces. Valid values are a list of env_files. For eg: [ "/export/home/env_file_1", "/export/home/env_file_2" ] -* `file_mapping_rules` - (Optional) Target VDB file mapping rules (Oracle Only). Rules must be line separated (\n or \r) and each line must have the format "pattern:replacement". Lines are applied in order. +* `tags` - [Updatable] The tags to be created for the VDB. This is a map of two parameters: + * `key` - (Required) Key of the tag + * `value` - (Required) Value of the tag -* `oracle_instance_name` - (Optional) Target VDB SID name (Oracle Only). +* `make_current_account_owner` - Boolean to determine if the account provisioning this VDB will be the "Owner" of the VDB. -* `unique_name` - (Optional) Target VDB db_unique_name (Oracle Only). +* `config_params` - [Updatable] The database configuration override parameters. -* `vcdb_name` - (Optional) When provisioning an Oracle Multitenant vCDB (when the cdb_id property is not set), the name of the provisioned vCDB (Oracle Multitenant Only). +* `appdata_source_params` - [Updatable] The JSON payload conforming to the DraftV4 schema based on the type of application data being manipulated. These values are unique to each AppData (PostgreSQL, MySQL, etc) connector. Consult the connector documentation for more details. -* `vcdb_database_name` - (Optional) When provisioning an Oracle Multitenant vCDB (when the cdb_id property is not set), the database name of the provisioned vCDB. Defaults to the value of the vcdb_name property. (Oracle Multitenant Only). +* `additional_mount_points` - [Updatable] Specifies additional locations on which to mount a subdirectory of an AppData container + * `shared_path` - (Required) Relative path within the container of the directory that should be mounted. + * `mount_path` - Absolute path on the target environment were the filesystem should be mounted + * `environment_id` - The entity ID of the environment on which the file system will be mounted. -* `mount_point` - (Optional) Mount point for the VDB (Oracle, ASE Only). +* `instance_name` - The VDB's SID name (Oracle Only). -* `open_reset_logs` - (Optional) Whether to open the database after provision (Oracle Only). +* `open_reset_logs` - TRUE or FALSE value which determines whether to open the database after provision (Oracle Only). -* `snapshot_policy_id` - (Optional) The ID of the snapshot policy for the VDB. +* `online_log_size` - The online log size in MB (Oracle Only). -* `retention_policy_id` - (Optional) The ID of the retention policy for the VDB. +* `online_log_groups` - The number of online log groups (Oracle Only). -* `recovery_model` - (Optional) Recovery model of the source database (MSSql Only). Valid values are `[ FULL, SIMPLE, BULK_LOGGED ]` +* `archive_log` - TRUE or FALSE boolean to create a VDB in `archivelog` mode (Oracle Only). -* `pre_script` - (Optional) [Updatable] PowerShell script or executable to run prior to provisioning (MSSql Only). +* `new_dbid` - [Updatable] TRUE or FALSE boolean to generate a new DB ID for the created VDB (Oracle Only). -* `post_script` - (Optional) [Updatable] PowerShell script or executable to run after provisioning (MSSql Only). +* `listener_ids` - [Updatable] The listener IDs for this provision operation. This is a list of listener ids. For eg: [ "listener-123", "listener-456" ] (Oracle Only). -* `cdc_on_provision` - (Optional) [Updatable] Option to enable change data capture (CDC) on both the provisioned VDB and subsequent snapshot-related operations (e.g. refresh, rewind) (MSSql Only). +* `file_mapping_rules` - The VDB file mapping rules (Oracle Only). Rules must be line separated (\n or \r) and each line must have the format "pattern:replacement". Lines are applied in order. -* `online_log_size` - (Optional) Online log size in MB (Oracle Only). +* `unique_name` - The VDB's db_unique_name (Oracle Only). -* `online_log_groups` - (Optional) Number of online log groups (Oracle Only). +* `auxiliary_template_id` - The ID of the configuration template to apply to the auxiliary container database (CDB). This is only relevant when provisioning a Multitenant pluggable database into an existing CDB, i.e when the cdb_id property is set. (Oracle Only) -* `archive_log` - (Optional) Option to create a VDB in archivelog mode (Oracle Only). +* `cdb_id` - The ID of the container database (CDB) to provision an Oracle Multitenant database into. If empty, a new vCDB will be provisioned. (Oracle only) -* `new_dbid` - (Optional) [Updatable] Option to generate a new DB ID for the created VDB (Oracle Only). +* `os_username` - The name of the privileged user to run the provision operation (Oracle only). -* `masked` - (Optional) Option to create a Masked VDB. Note: You should define a `configure_clone` script in the Hooks step to mask the dataset. The selection of the "Mask this VDB" option will cause the data to be marked as masked, whether you have defined a script to do so or not. -If you do not define a script to mask the dataset, the data will not be masked unless there is a masking job associated with the source dataset. +* `os_password` - The password of the privileged user to run the provision operation (Oracle only). -* `listener_ids` - (Optional) [Updatable] The listener IDs for this provision operation (Oracle Only). This is a list of listener ids. For eg: [ "listener-123", "listener-456" ] +* `vcdb_tde_key_identifier` - ID of the key created by the Continuous Data Engine. (Oracle Multitenant Only) -* `custom_env_vars` - (Optional) -Environment variable to be set when the engine creates a VDB. See the Engine documentation for the list of allowed/denied environment variables and rules about substitution. This is an ordered map of key-value pairs. For eg: { "MY_ENV_VAR1": "$ORACLE_HOME", "MY_ENV_VAR2": "$CRS_HOME/after" } +* `cdb_tde_keystore_password` - [Updatable] The password for the Transparent Data Encryption keystore associated with the CDB. (Oracle Multitenant Only) -* `custom_env_files` - (Optional) Environment files to be sourced when the Engine creates a VDB. This path can be followed by parameters. Paths and parameters are separated by spaces. Valid values are a list of env_files. For eg: [ "/export/home/env_file_1", "/export/home/env_file_2" ] +* `target_vcdb_tde_keystore_path` - [Updatable] Path to the keystore of the vCDB. (Oracle Multitenant Only) -* `timestamp` - (Optional) The point in time from which to execute the operation. Mutually exclusive with timestamp_in_database_timezone. If the timestamp is not set, selects the latest point. +* `tde_key_identifier` - [Updatable] ID of the key created by the Continuous Data Engine. (Oracle Multitenant Only) -* `timestamp_in_database_timezone` - (Optional) The point in time from which to execute the operation, expressed as a date-time in the timezone of the source database. Mutually exclusive with timestamp. +* `tde_exported_key_file_secret` - Secret to be used while exporting and importing vPDB encryption keys if Transparent Data Encryption is enabled on the vPDB. (Oracle Multitenant Only) -* `snapshot_id` - (Optional) The ID or name of the snapshot from which to execute the operation. If the snapshot_id is not, selects the latest snapshot. +* `parent_tde_keystore_password` - [Updatable] The password of the keystore specified in parentTdeKeystorePath. (Oracle Multitenant Only) -* `bookmark_id` - (Optional) The ID or name of the bookmark from which to execute the operation. The bookmark must contain only one VDB. +* `parent_tde_keystore_path` - [Updatable] Path to a copy of the parent's Oracle transparent data encryption keystore on the target host. Required to provision from snapshots containing encrypted database files. (Oracle Multitenant Only) -* `tags` - (Optional) The tags to be created for VDB. This is a map of 2 parameters: - * `key` - (Required) Key of the tag - * `value` - (Required) Value of the tag +* `vcdb_name` - The name of the provisioned vCDB when the cdb_id property is not set (Oracle Multitenant Only). -* `make_current_account_owner` - (Optional) Whether the account provisioning this VDB must be configured as owner of the VDB. +* `vcdb_database_name` - The database name of the provisioned vCDB wwhen the cdb_id property is not set. Defaults to the value of the vcdb_name property (Oracle Multitenant Only). -* `config_params` - (Optional) Database configuration parameter overrides +* `cluster_node_ids` - The cluster node ids, name, or addresses for this provision operation (Oracle RAC Only). -* `appdata_source_params` - The JSON payload conforming to the DraftV4 schema based on the type of application data being manipulated. +* `oracle_rac_custom_env_vars` - Environment variable to be set when the engine creates an Oracle RAC VDB. See the Engine documentation for the list of allowed/denied environment variables and rules about substitution. + * `node_id` - (Required) The node id of the cluster. + * `name` - (Required) Name of the environment variable + * `value` - (Required) Value of the environment variable. -* `appdata_config_params` - (Optional) The list of parameters specified by the source config schema in the toolkit +* `oracle_rac_custom_env_files` - Environment files to be sourced when the Engine creates an Oracle RAC VDB. This path can be followed by parameters. Paths and parameters are separated by spaces. + * `node_id` - (Required) The node id of the cluster. + * `path_parameters` - (Required) This references a file from which certain parameters will be loaded. -* `additional_mount_points` - (Optional) Specifies additional locations on which to mount a subdirectory of an AppData container - * `shared_path` - (Required) Relative path within the container of the directory that should be mounted. - * `mount_path` - (Required) Absolute path on the target environment were the filesystem should be mounted - * `environment_id` - (Required) The entity ID of the environment on which the file system will be mounted. +* `db_username` - [Updatable] The username of the database (Oracle, SAP ASE only). Only for update. -* `vcdb_tde_key_identifier` - (Optional) ID of the key created by Delphix. (Oracle Multitenant Only) +* `db_password` - [Updatable] The password of the database (Oracle, SAP ASE only). Only for update. -* `cdb_tde_keystore_password` - (Optional) The password for the Transparent Data Encryption keystore associated with the CDB. (Oracle Multitenant Only) +* `template_id` - [Updatable] The ID of the VDB Configuration Template (Oracle, SQL Server Only). -* `target_vcdb_tde_keystore_path` - (Optional) Path to the keystore of the target vCDB. (Oracle Multitenant Only) +* `database_name` - The name of the database on the Target environment. Defaults to "name" (Oracle, MSSQL, SAP ASE). -* `tde_key_identifier` - (Optional) ID of the key created by Delphix. (Oracle Multitenant Only) +* `mount_point` - [Updatable] The mount point for the VDB (Oracle, ASE Only). -* `tde_exported_key_file_secret` - (Optional) Secret to be used while exporting and importing vPDB encryption keys if Transparent Data Encryption is enabled on the vPDB. (Oracle Multitenant Only) +* `truncate_log_on_checkpoint` - TRUE or FALSE value to truncate the logs on checkpoints (SAP ASE only). -* `parent_tde_keystore_password` - (Optional) The password of the keystore specified in parentTdeKeystorePath. (Oracle Multitenant Only) +* `recovery_model` - Recovery model of the source database. Valid values are `[ FULL, SIMPLE, BULK_LOGGED ]` (SQL Server Only). -* `parent_tde_keystore_path` - (Optional) Path to a copy of the parent's Oracle transparent data encryption keystore on the target host. Required to provision from snapshots containing encrypted database files. (Oracle Multitenant Only) +* `pre_script` - [Updatable] PowerShell script or executable to run prior to provisioning (SQL Server Only). -* `oracle_rac_custom_env_vars` - (Optional) Environment variable to be set when the engine creates an Oracle RAC VDB. See the Engine documentation for the list of allowed/denied environment variables and rules about substitution. - * `node_id` - (Required) The node id of the cluster. - * `name` - (Required) Name of the environment variable - * `value` - (Required) Value of the environment variable. +* `post_script` - [Updatable] PowerShell script or executable to run after provisioning (SQL Server Only). -* `oracle_rac_custom_env_files` - (Optional) Environment files to be sourced when the Engine creates an Oracle RAC VDB. This path can be followed by parameters. Paths and parameters are separated by spaces. - * `node_id` - (Required) The node id of the cluster. - * `path_parameters` - (Required) This references a file from which certain parameters will be loaded. +* `cdc_on_provision` - [Updatable] Option to enable change data capture (CDC) on both the provisioned VDB and subsequent snapshot-related operations (e.g. refresh, rewind) (SQL Server Only). ## Attribute Reference @@ -286,3 +256,21 @@ Environment variable to be set when the engine creates a VDB. See the Engine doc * `tags` - A list of key value pair. * `creation_date` - The date this VDB was created. + +## Import (Beta) + +Use the [`import` block](https://developer.hashicorp.com/terraform/language/import) to add VDBs created directly in Data Control Tower into a Terraform state file. + +For example: +```terraform +import { + to = delphix_vdb.vdb_import_demo + id = "vdb_id" +} +``` + +*This is a beta feature. Delphix offers no guarantees of support or compatibility.* + +## Limitations + +Not all properties are supported through the `update` command. Properties that are not supported by the `update` command are presented via an error message at runtime. diff --git a/examples/vdb/hana/bookmark/main.tf b/examples/vdb/hana/bookmark/main.tf index cf4ada6..7ffe785 100644 --- a/examples/vdb/hana/bookmark/main.tf +++ b/examples/vdb/hana/bookmark/main.tf @@ -26,7 +26,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -48,12 +47,6 @@ resource "delphix_vdb" "example" { tSystemUserName = "" tSystemUserPassword ="" }) - config_params jsonencode({ - processes = 150 - }) - appdata_config_params jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -63,78 +56,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/hana/snapshot/main.tf b/examples/vdb/hana/snapshot/main.tf index f5a8019..0c6a8b5 100644 --- a/examples/vdb/hana/snapshot/main.tf +++ b/examples/vdb/hana/snapshot/main.tf @@ -27,7 +27,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -49,12 +48,6 @@ resource "delphix_vdb" "example" { tSystemUserName = "" tSystemUserPassword ="" }) - config_params jsonencode({ - processes = 150 - }) - appdata_config_params jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -64,78 +57,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/hana/timestamp/main.tf b/examples/vdb/hana/timestamp/main.tf index 26711ba..fbb2525 100644 --- a/examples/vdb/hana/timestamp/main.tf +++ b/examples/vdb/hana/timestamp/main.tf @@ -28,7 +28,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -50,12 +49,6 @@ resource "delphix_vdb" "example" { tSystemUserName = "" tSystemUserPassword ="" }) - config_params jsonencode({ - processes = 150 - }) - appdata_config_params jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -65,78 +58,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/mssql/bookmark/main.tf b/examples/vdb/mssql/bookmark/main.tf index 4c13390..e69eca6 100644 --- a/examples/vdb/mssql/bookmark/main.tf +++ b/examples/vdb/mssql/bookmark/main.tf @@ -45,92 +45,66 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/mssql/snapshot/main.tf b/examples/vdb/mssql/snapshot/main.tf index 5b953ca..d504695 100644 --- a/examples/vdb/mssql/snapshot/main.tf +++ b/examples/vdb/mssql/snapshot/main.tf @@ -46,92 +46,66 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/mssql/timestamp/main.tf b/examples/vdb/mssql/timestamp/main.tf index 5c72226..f31a864 100644 --- a/examples/vdb/mssql/timestamp/main.tf +++ b/examples/vdb/mssql/timestamp/main.tf @@ -48,92 +48,66 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_script { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/mysql/snapshot/main.tf b/examples/vdb/mysql/snapshot/main.tf index e4655eb..0a5f0fe 100644 --- a/examples/vdb/mysql/snapshot/main.tf +++ b/examples/vdb/mysql/snapshot/main.tf @@ -27,7 +27,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -46,12 +45,7 @@ resource "delphix_vdb" "example" { vdbPass: XXXX, vdbUser: XXXX }) - config_params = jsonencode({ -processes = 150 -}) -appdata_config_params = jsonencode({ -param = "value" -}) + additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -62,78 +56,56 @@ post_snapshot { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } post_start { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" -element_id = "string" -has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/oracle/bookmark/main.tf b/examples/vdb/oracle/bookmark/main.tf index eb548f3..81bb90d 100644 --- a/examples/vdb/oracle/bookmark/main.tf +++ b/examples/vdb/oracle/bookmark/main.tf @@ -45,7 +45,7 @@ resource "delphix_vdb" "example" { new_dbid = true cluster_node_ids = ["ORACLE_CLUSTER_NODE-ID"] auxiliary_template_id = "aux-template-1" - oracle_instance_name = "dbdhcp2" + instance_name = "dbdhcp2" retention_policy_id = "test_retention_policy" template_id = "template-1" repository_id = "" @@ -77,78 +77,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } tags { key = "key-1" diff --git a/examples/vdb/oracle/snapshot/main.tf b/examples/vdb/oracle/snapshot/main.tf index e2cdfe5..b8ee18e 100644 --- a/examples/vdb/oracle/snapshot/main.tf +++ b/examples/vdb/oracle/snapshot/main.tf @@ -46,7 +46,7 @@ resource "delphix_vdb" "example" { auxiliary_template_id = "aux-template-1" database_name = "dbname_to_be_created" mount_point = "/var/mnt" - oracle_instance_name = "dbdhcp2" + instance_name = "dbdhcp2" retention_policy_id = "test_retention_policy" template_id = "template-1" cdb_id = "" @@ -76,81 +76,59 @@ resource "delphix_vdb" "example" { processes = 150 }) pre_start { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } pre_rollback { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } post_start { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } post_rollback { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } pre_stop { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } configure_clone { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } post_snapshot { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } pre_refresh { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { - name = "string", - command = "string", - shell = "bash", - element_id = "string", - has_credentials = true + name = "string" + command = "string" + shell = "bash" } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } tags { key = "key-1" diff --git a/examples/vdb/oracle/timestamp/main.tf b/examples/vdb/oracle/timestamp/main.tf index da39843..67786a9 100644 --- a/examples/vdb/oracle/timestamp/main.tf +++ b/examples/vdb/oracle/timestamp/main.tf @@ -46,7 +46,7 @@ resource "delphix_vdb" "example" { file_mapping_rules = "/datafile/dbdhcp3/oradata/dbdhcp3:/data\n/u03/app/ora11202/product/11.2.0/dbhome_1/dbs/dbv_R2V4.dbf:/data/dbv_R2V4.dbf" new_dbid = true auxiliary_template_id = "aux-template-1" - oracle_instance_name = "dbdhcp2" + instance_name = "dbdhcp2" retention_policy_id = "test_retention_policy" template_id = "template-1" listener_ids = ["id"] @@ -78,78 +78,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } tags { key = "key-1" diff --git a/examples/vdb/postgresql/bookmark/main.tf b/examples/vdb/postgresql/bookmark/main.tf index 87ad424..f9f5100 100644 --- a/examples/vdb/postgresql/bookmark/main.tf +++ b/examples/vdb/postgresql/bookmark/main.tf @@ -26,7 +26,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -41,12 +40,6 @@ resource "delphix_vdb" "example" { postgresPort = 5434 configSettingsStg = [{ propertyName: "timezone", value:"GMT", commentProperty:false}] }) - config_params jsonencode({ - processes = 150 - }) - appdata_config_params jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -57,78 +50,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/postgresql/snapshot/main.tf b/examples/vdb/postgresql/snapshot/main.tf index f92b13e..62485df 100644 --- a/examples/vdb/postgresql/snapshot/main.tf +++ b/examples/vdb/postgresql/snapshot/main.tf @@ -27,7 +27,6 @@ resource "delphix_vdb" "example" { environment_user_id = "environment_user_name" target_group_id = "group-123" snapshot_policy_id = "test_snapshot_policy" - database_name = "dbname_to_be_created" mount_point = "/var/mnt" auto_select_repository = true retention_policy_id = "test_retention_policy" @@ -42,12 +41,6 @@ resource "delphix_vdb" "example" { postgresPort = 5434 configSettingsStg = [{ propertyName: "timezone", value:"GMT", commentProperty:false}] }) - config_params = jsonencode({ - processes = 150 - }) - appdata_config_params = jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -58,78 +51,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/postgresql/timestamp/main.tf b/examples/vdb/postgresql/timestamp/main.tf index 9fe33cc..38b42ec 100644 --- a/examples/vdb/postgresql/timestamp/main.tf +++ b/examples/vdb/postgresql/timestamp/main.tf @@ -43,12 +43,6 @@ resource "delphix_vdb" "example" { postgresPort = 5434 configSettingsStg = [{ propertyName: "timezone", value:"GMT", commentProperty:false}] }) - config_params jsonencode({ - processes = 150 - }) - appdata_config_params jsonencode({ - param = "value" - }) additional_mount_points = [{ shared_path = "/", mount_path = "/work", @@ -58,78 +52,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/sybase/bookmark/main.tf b/examples/vdb/sybase/bookmark/main.tf index 1d3705f..086acf4 100644 --- a/examples/vdb/sybase/bookmark/main.tf +++ b/examples/vdb/sybase/bookmark/main.tf @@ -44,78 +44,56 @@ resource "delphix_vdb" "example" { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } pre_stop { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } pre_refresh { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } post_start { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } post_snapshot { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } post_refresh { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } post_stop { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } post_rollback { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } pre_rollback { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } pre_start { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } configure_clone { name = "string", command = "string", shell = "bash", - element_id = "string", - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/sybase/snapshot/main.tf b/examples/vdb/sybase/snapshot/main.tf index 1913657..d838dfd 100644 --- a/examples/vdb/sybase/snapshot/main.tf +++ b/examples/vdb/sybase/snapshot/main.tf @@ -45,78 +45,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/examples/vdb/sybase/timestamp/main.tf b/examples/vdb/sybase/timestamp/main.tf index 1ccf2c0..ae6b7c0 100644 --- a/examples/vdb/sybase/timestamp/main.tf +++ b/examples/vdb/sybase/timestamp/main.tf @@ -46,78 +46,56 @@ resource "delphix_vdb" "example" { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_snapshot { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_refresh { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_stop { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } post_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_rollback { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } pre_start { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } configure_clone { name = "string" command = "string" shell = "bash" - element_id = "string" - has_credentials = true } make_current_account_owner = true tags { diff --git a/go.mod b/go.mod index 267684f..1475ad4 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,10 @@ module terraform-provider-delphix go 1.22.0 +toolchain go1.22.6 + require ( - github.com/delphix/dct-sdk-go/v14 v14.0.0 + github.com/delphix/dct-sdk-go/v22 v22.0.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 ) diff --git a/go.sum b/go.sum index 7810a1a..27fb933 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/delphix/dct-sdk-go/v14 v14.0.0 h1:ap+uHAN2zvQQgvyk4lImAjvIo6tkN1pG/8932qq66bY= -github.com/delphix/dct-sdk-go/v14 v14.0.0/go.mod h1:OwfMNFfYxQrunIxpfmFK0cIKC6o8gf2x7fE8LhcWNfo= +github.com/delphix/dct-sdk-go/v22 v22.0.0 h1:bHwJ6wVunGte6szxbHMt0E9O/Ox3ydcb02RMdRa3zpc= +github.com/delphix/dct-sdk-go/v22 v22.0.0/go.mod h1:VHMZm4vQdh8FJj8lsdvbTeCJzy22a6/kjrsIkHmaefg= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -132,9 +132,8 @@ github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= diff --git a/internal/provider/commons.go b/internal/provider/commons.go index 5cfddd2..77ce1f3 100644 --- a/internal/provider/commons.go +++ b/internal/provider/commons.go @@ -15,3 +15,77 @@ const ( WARN string = "[WARN] " ERROR string = "[ERROR] " ) + +var updatableVdbKeys = map[string]bool{ + "name": true, + "db_username": true, + "db_password": true, + "pre_refresh": true, + "post_refresh": true, + "configure_clone": true, + "pre_snapshot": true, + "post_snapshot": true, + "pre_rollback": true, + "post_rollback": true, + "pre_start": true, + "post_start": true, + "pre_stop": true, + "post_stop": true, + "template_id": true, + "pre_script": true, + "post_script": true, + "custom_env_vars": true, + "custom_env_files": true, + "appdata_source_params": true, + "config_params": true, + "cdb_tde_keystore_password": true, + "target_vcdb_tde_keystore_path": true, + "tde_key_identifier": true, + "parent_tde_keystore_password": true, + "parent_tde_keystore_path": true, + "additional_mount_points": true, + "cdc_on_provision": true, + "environment_user_id": true, + "listener_ids": true, + "vdb_restart": true, + "new_dbid": true, + "mount_point": true, + "tags": true, +} + +var isDestructiveVdbUpdate = map[string]bool{ + "name": false, + "db_username": false, + "db_password": false, + "pre_refresh": false, + "post_refresh": false, + "configure_clone": false, + "pre_snapshot": false, + "post_snapshot": false, + "pre_rollback": false, + "post_rollback": false, + "pre_start": false, + "post_start": false, + "pre_stop": false, + "post_stop": false, + "template_id": true, + "pre_script": false, + "post_script": false, + "custom_env_vars": false, + "custom_env_files": false, + "appdata_source_params": true, + "config_params": true, + "cdb_tde_keystore_password": true, + "target_vcdb_tde_keystore_path": true, + "tde_key_identifier": true, + "parent_tde_keystore_password": true, + "parent_tde_keystore_path": true, + "additional_mount_points": false, + "cdc_on_provision": true, + "environment_user_id": true, + "listener_ids": false, + "vdb_restart": false, + "new_dbid": false, + "mount_point": true, + "tags": false, +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index b60675a..9f663d3 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -5,7 +5,7 @@ import ( "crypto/tls" "net/http" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -99,8 +99,8 @@ func configure(version string, p *schema.Provider) func(context.Context, *schema } // make a test call - req := client.ManagementApi.GetRegisteredEngines(ctx) - _, _, err := client.ManagementApi.GetRegisteredEnginesExecute(req) + req := client.ManagementAPI.GetRegisteredEngines(ctx) + _, _, err := client.ManagementAPI.GetRegisteredEnginesExecute(req) if err != nil { return nil, diag.FromErr(err) diff --git a/internal/provider/resource_appdata_dsource.go b/internal/provider/resource_appdata_dsource.go index 0f54c8f..6c4cea6 100644 --- a/internal/provider/resource_appdata_dsource.go +++ b/internal/provider/resource_appdata_dsource.go @@ -5,7 +5,7 @@ import ( "encoding/json" "net/http" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -457,7 +457,7 @@ func resourceAppdataDsourceCreate(ctx context.Context, d *schema.ResourceData, m appDataDSourceLinkSourceParameters.SetSyncParameters(sync_params) } - req := client.DSourcesApi.LinkAppdataDatabase(ctx) + req := client.DSourcesAPI.LinkAppdataDatabase(ctx) apiRes, httpRes, err := req.AppDataDSourceLinkSourceParameters(*appDataDSourceLinkSourceParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -497,12 +497,18 @@ func resourceDsourceRead(ctx context.Context, d *schema.ResourceData, meta inter dsource_id := d.Id() res, diags := PollForObjectExistence(ctx, func() (interface{}, *http.Response, error) { - return client.DSourcesApi.GetDsourceById(ctx, dsource_id).Execute() + return client.DSourcesAPI.GetDsourceById(ctx, dsource_id).Execute() }) + if res == nil { + tflog.Error(ctx, DLPX+ERROR+"Dsource not found: "+dsource_id+", removing from state. ") + d.SetId("") + return nil + } + if diags != nil { _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.DSourcesApi.GetDsourceById(ctx, dsource_id).Execute() + return client.DSourcesAPI.GetDsourceById(ctx, dsource_id).Execute() }) // This would imply error in poll for deletion so we just log and exit. if diags != nil { @@ -564,7 +570,7 @@ func resourceDsourceDelete(ctx context.Context, d *schema.ResourceData, meta int deleteDsourceParams := dctapi.NewDeleteDSourceRequest(dsourceId) deleteDsourceParams.SetForce(false) - res, httpRes, err := client.DSourcesApi.DeleteDsource(ctx).DeleteDSourceRequest(*deleteDsourceParams).Execute() + res, httpRes, err := client.DSourcesAPI.DeleteDsource(ctx).DeleteDSourceRequest(*deleteDsourceParams).Execute() if diags := apiErrorResponseHelper(ctx, res, httpRes, err); diags != nil { return diags @@ -580,7 +586,7 @@ func resourceDsourceDelete(ctx context.Context, d *schema.ResourceData, meta int } _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.DSourcesApi.GetDsourceById(ctx, dsourceId).Execute() + return client.DSourcesAPI.GetDsourceById(ctx, dsourceId).Execute() }) return diags diff --git a/internal/provider/resource_appdata_dsource_test.go b/internal/provider/resource_appdata_dsource_test.go index 260b6a7..5a3c79d 100644 --- a/internal/provider/resource_appdata_dsource_test.go +++ b/internal/provider/resource_appdata_dsource_test.go @@ -123,7 +123,7 @@ func testDsourceExists(n string, sourceId string) resource.TestCheckFunc { } client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.DSourcesApi.GetDsourceById(context.Background(), dsourceId).Execute() + res, _, err := client.DSourcesAPI.GetDsourceById(context.Background(), dsourceId).Execute() if err != nil { return err } @@ -147,7 +147,7 @@ func testDsourceDestroy(s *terraform.State) error { dsourceId := rs.Primary.ID - _, httpResp, _ := client.DSourcesApi.GetDsourceById(context.Background(), dsourceId).Execute() + _, httpResp, _ := client.DSourcesAPI.GetDsourceById(context.Background(), dsourceId).Execute() if httpResp == nil { return fmt.Errorf("Dsource has not been deleted") } diff --git a/internal/provider/resource_database_postgresql.go b/internal/provider/resource_database_postgresql.go index cc0a629..7b976a0 100644 --- a/internal/provider/resource_database_postgresql.go +++ b/internal/provider/resource_database_postgresql.go @@ -3,8 +3,9 @@ package provider import ( "context" "net/http" + "strings" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -124,6 +125,9 @@ func resourceSource() *schema.Resource { }, }, }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, } } @@ -146,7 +150,7 @@ func resourceDatabasePostgressqlCreate(ctx context.Context, d *schema.ResourceDa sourceCreateParameters.SetEngineId(v.(string)) } - req := client.SourcesApi.CreatePostgresSource(ctx) + req := client.SourcesAPI.CreatePostgresSource(ctx) apiRes, httpRes, err := req.PostgresSourceCreateParameters(*sourceCreateParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -178,18 +182,23 @@ func resourceDatabasePostgressqlCreate(ctx context.Context, d *schema.ResourceDa } func resourceDatabasePostgressqlRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*apiClient).client source_id := d.Id() res, diags := PollForObjectExistence(ctx, func() (interface{}, *http.Response, error) { - return client.SourcesApi.GetSourceById(ctx, source_id).Execute() + return client.SourcesAPI.GetSourceById(ctx, source_id).Execute() }) + if res == nil { + tflog.Error(ctx, DLPX+ERROR+"PostgreSQL source not found: "+source_id+", removing from state. ") + d.SetId("") + return nil + } + if diags != nil { _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.SourcesApi.GetSourceById(ctx, source_id).Execute() + return client.SourcesAPI.GetSourceById(ctx, source_id).Execute() }) // This would imply error in poll for deletion so we just log and exit. if diags != nil { @@ -208,7 +217,25 @@ func resourceDatabasePostgressqlRead(ctx context.Context, d *schema.ResourceData return diag.Errorf("Error occured in type casting.") } + repository_value := d.Get("repository_value").(string) + + if repository_value == "" { + resEnv, httpRes, err := client.EnvironmentsAPI.GetEnvironmentById(ctx, result.GetEnvironmentId()).Execute() + + if diags := apiErrorResponseHelper(ctx, resEnv, httpRes, err); diags != nil { + return diags + } + if result.GetRepository() != "" { + for _, repo := range resEnv.Repositories { + if strings.EqualFold(repo.GetId(), result.GetRepository()) { + repository_value = repo.GetName() + } + } + } + } + d.Set("id", result.GetId()) + d.Set("repository_value", repository_value) d.Set("environment_id", result.GetEnvironmentId()) d.Set("database_type", result.GetDatabaseType()) d.Set("name", result.GetName()) @@ -262,7 +289,7 @@ func resourceDatabasePostgressqlUpdate(ctx context.Context, d *schema.ResourceDa updateSourceParam.SetName(d.Get("name").(string)) } - res, httpRes, err := client.SourcesApi.UpdatePostgresSourceById(ctx, d.Get("id").(string)).PostgresSourceUpdateParameters(*updateSourceParam).Execute() + res, httpRes, err := client.SourcesAPI.UpdatePostgresSourceById(ctx, d.Get("id").(string)).PostgresSourceUpdateParameters(*updateSourceParam).Execute() if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil { // revert and set the old value to the changed keys @@ -290,7 +317,7 @@ func resourceDatabasePostgressqlDelete(ctx context.Context, d *schema.ResourceDa source_id := d.Id() - res, httpRes, err := client.SourcesApi.DeleteSource(ctx, source_id).Execute() + res, httpRes, err := client.SourcesAPI.DeleteSource(ctx, source_id).Execute() if diags := apiErrorResponseHelper(ctx, res, httpRes, err); diags != nil { return diags @@ -306,7 +333,7 @@ func resourceDatabasePostgressqlDelete(ctx context.Context, d *schema.ResourceDa } _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.SourcesApi.GetSourceById(ctx, source_id).Execute() + return client.SourcesAPI.GetSourceById(ctx, source_id).Execute() }) return diags diff --git a/internal/provider/resource_database_postgresql_test.go b/internal/provider/resource_database_postgresql_test.go index 599f35f..eda3296 100644 --- a/internal/provider/resource_database_postgresql_test.go +++ b/internal/provider/resource_database_postgresql_test.go @@ -71,7 +71,7 @@ func testSourceDestroy(s *terraform.State) error { sourceId := rs.Primary.ID - _, httpResp, _ := client.SourcesApi.GetSourceById(context.Background(), sourceId).Execute() + _, httpResp, _ := client.SourcesAPI.GetSourceById(context.Background(), sourceId).Execute() if httpResp == nil { return fmt.Errorf("Source has not been deleted") } @@ -107,7 +107,7 @@ func testSourceExists(n string, name string) resource.TestCheckFunc { } client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.SourcesApi.GetSourceById(context.Background(), sourceId).Execute() + res, _, err := client.SourcesAPI.GetSourceById(context.Background(), sourceId).Execute() if err != nil { return err } diff --git a/internal/provider/resource_environment.go b/internal/provider/resource_environment.go index d15b3bd..195133a 100644 --- a/internal/provider/resource_environment.go +++ b/internal/provider/resource_environment.go @@ -2,10 +2,11 @@ package provider import ( "context" - "github.com/hashicorp/terraform-plugin-log/tflog" "net/http" - dctapi "github.com/delphix/dct-sdk-go/v14" + "github.com/hashicorp/terraform-plugin-log/tflog" + + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -384,7 +385,7 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta createEnvParams.SetTags(toTagArray(v)) } - apiReq := client.EnvironmentsApi.CreateEnvironment(ctx) + apiReq := client.EnvironmentsAPI.CreateEnvironment(ctx) apiRes, httpRes, err := apiReq.EnvironmentCreateParameters(*createEnvParams).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -415,12 +416,18 @@ func resourceEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta i envId := d.Id() apiRes, diags := PollForObjectExistence(ctx, func() (interface{}, *http.Response, error) { - return client.EnvironmentsApi.GetEnvironmentById(ctx, envId).Execute() + return client.EnvironmentsAPI.GetEnvironmentById(ctx, envId).Execute() }) + if apiRes == nil { + tflog.Error(ctx, DLPX+ERROR+"Environment not found: "+envId+", removing from state. ") + d.SetId("") + return nil + } + if diags != nil { _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.EnvironmentsApi.GetEnvironmentById(ctx, envId).Execute() + return client.EnvironmentsAPI.GetEnvironmentById(ctx, envId).Execute() }) if diags != nil { tflog.Error(ctx, DLPX+ERROR+"Error in polling of environment for deletion.") @@ -450,7 +457,7 @@ func resourceEnvironmentDelete(ctx context.Context, d *schema.ResourceData, meta client := meta.(*apiClient).client envId := d.Id() - apiRes, httpRes, err := client.EnvironmentsApi.DeleteEnvironment(ctx, envId).Execute() + apiRes, httpRes, err := client.EnvironmentsAPI.DeleteEnvironment(ctx, envId).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { return diags @@ -464,7 +471,7 @@ func resourceEnvironmentDelete(ctx context.Context, d *schema.ResourceData, meta return diag.Errorf("[NOT OK] Env-Delete %s. JobId: %s / Error: %s", job_status, *apiRes.Job.Id, job_err) } _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.EnvironmentsApi.GetEnvironmentById(ctx, envId).Execute() + return client.EnvironmentsAPI.GetEnvironmentById(ctx, envId).Execute() }) return diags diff --git a/internal/provider/resource_environment_test.go b/internal/provider/resource_environment_test.go index 6e33a96..e6151bd 100644 --- a/internal/provider/resource_environment_test.go +++ b/internal/provider/resource_environment_test.go @@ -86,7 +86,7 @@ func testAccCheckDctEnvResourceExists(n string, hostname string) resource.TestCh } client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.EnvironmentsApi.GetEnvironmentById(context.Background(), EnvId).Execute() + res, _, err := client.EnvironmentsAPI.GetEnvironmentById(context.Background(), EnvId).Execute() if err != nil { return err } @@ -110,7 +110,7 @@ func testAccCheckEnvDestroy(s *terraform.State) error { EnvId := rs.Primary.ID - _, httpResp, _ := client.EnvironmentsApi.GetEnvironmentById(context.Background(), EnvId).Execute() + _, httpResp, _ := client.EnvironmentsAPI.GetEnvironmentById(context.Background(), EnvId).Execute() if httpResp == nil { return fmt.Errorf("Environment has not been deleted") } diff --git a/internal/provider/resource_oracle_dsource.go b/internal/provider/resource_oracle_dsource.go index c0066ce..940fa01 100644 --- a/internal/provider/resource_oracle_dsource.go +++ b/internal/provider/resource_oracle_dsource.go @@ -2,9 +2,10 @@ package provider import ( "context" + "github.com/hashicorp/terraform-plugin-log/tflog" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -710,7 +711,7 @@ func resourceOracleDsourceCreate(ctx context.Context, d *schema.ResourceData, me oracleDSourceLinkSourceParameters.SetOpsPreLogSync(toSourceOperationArray(v)) } - req := client.DSourcesApi.LinkOracleDatabase(ctx) + req := client.DSourcesAPI.LinkOracleDatabase(ctx) apiRes, httpRes, err := req.OracleDSourceLinkSourceParameters(*oracleDSourceLinkSourceParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { diff --git a/internal/provider/resource_oracle_dsource_test.go b/internal/provider/resource_oracle_dsource_test.go index 8ba5100..9c544cb 100644 --- a/internal/provider/resource_oracle_dsource_test.go +++ b/internal/provider/resource_oracle_dsource_test.go @@ -105,7 +105,7 @@ func testOracleDsourceExists(n string, sourceValue string) resource.TestCheckFun } client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.DSourcesApi.GetDsourceById(context.Background(), dsourceId).Execute() + res, _, err := client.DSourcesAPI.GetDsourceById(context.Background(), dsourceId).Execute() if err != nil { return err } @@ -129,7 +129,7 @@ func testOracleDsourceDestroy(s *terraform.State) error { dsourceId := rs.Primary.ID - _, httpResp, _ := client.DSourcesApi.GetDsourceById(context.Background(), dsourceId).Execute() + _, httpResp, _ := client.DSourcesAPI.GetDsourceById(context.Background(), dsourceId).Execute() if httpResp == nil { return fmt.Errorf("Dsource has not been deleted") } diff --git a/internal/provider/resource_vdb.go b/internal/provider/resource_vdb.go index 44b8a5d..ded2882 100644 --- a/internal/provider/resource_vdb.go +++ b/internal/provider/resource_vdb.go @@ -4,10 +4,12 @@ import ( "context" "encoding/json" "net/http" + "strings" "time" - dctapi "github.com/delphix/dct-sdk-go/v14" "github.com/hashicorp/terraform-plugin-log/tflog" + + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -91,6 +93,7 @@ func resourceVdb() *schema.Resource { "database_name": { Type: schema.TypeString, Optional: true, + Computed: true, }, "cdb_id": { Type: schema.TypeString, @@ -150,11 +153,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -178,11 +181,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -206,11 +209,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -234,11 +237,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -262,11 +265,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -290,11 +293,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -318,11 +321,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -346,11 +349,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -374,11 +377,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -402,11 +405,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -430,11 +433,11 @@ func resourceVdb() *schema.Resource { }, "element_id": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "has_credentials": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -442,11 +445,16 @@ func resourceVdb() *schema.Resource { "vdb_restart": { Type: schema.TypeBool, Optional: true, + Computed: true, }, "template_id": { Type: schema.TypeString, Optional: true, }, + "jdbc_connection_string": { + Type: schema.TypeString, + Computed: true, + }, "auxiliary_template_id": { Type: schema.TypeString, Optional: true, @@ -455,9 +463,10 @@ func resourceVdb() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "oracle_instance_name": { + "instance_name": { Type: schema.TypeString, Optional: true, + Computed: true, }, "unique_name": { Type: schema.TypeString, @@ -474,6 +483,7 @@ func resourceVdb() *schema.Resource { "mount_point": { Type: schema.TypeString, Optional: true, + Computed: true, }, "open_reset_logs": { Type: schema.TypeBool, @@ -560,6 +570,14 @@ func resourceVdb() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "parent_dsource_id": { + Type: schema.TypeString, + Computed: true, + }, + "root_parent_id": { + Type: schema.TypeString, + Computed: true, + }, "tags": { Type: schema.TypeList, Optional: true, @@ -583,7 +601,6 @@ func resourceVdb() *schema.Resource { }, "appdata_config_params": { Type: schema.TypeString, - Optional: true, Computed: true, }, "make_current_account_owner": { @@ -680,6 +697,9 @@ func resourceVdb() *schema.Resource { }, }, }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, } } @@ -693,6 +713,14 @@ func toHookArray(array interface{}) []dctapi.Hook { if name != "" { hook_item.SetName(item_map["name"].(string)) } + element_id := item_map["element_id"].(string) + if element_id != "" { + hook_item.SetElementId(element_id) + } + has_credentials := item_map["has_credentials"].(bool) + if has_credentials { + hook_item.SetHasCredentials(has_credentials) + } // defaults to "bash" as per resource schema spec hook_item.SetShell(item_map["shell"].(string)) @@ -701,17 +729,6 @@ func toHookArray(array interface{}) []dctapi.Hook { return items } -func toTagArray(array interface{}) []dctapi.Tag { - items := []dctapi.Tag{} - for _, item := range array.([]interface{}) { - item_map := item.(map[string]interface{}) - tag_item := dctapi.NewTag(item_map["key"].(string), item_map["value"].(string)) - - items = append(items, *tag_item) - } - return items -} - func toAdditionalMountPointsArray(array interface{}) []dctapi.AdditionalMountPoint { items := []dctapi.AdditionalMountPoint{} for _, item := range array.([]interface{}) { @@ -817,7 +834,7 @@ func helper_provision_by_snapshot(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOk("file_mapping_rules"); has_v { provisionVDBBySnapshotParameters.SetFileMappingRules(v.(string)) } - if v, has_v := d.GetOk("oracle_instance_name"); has_v { + if v, has_v := d.GetOk("instance_name"); has_v { provisionVDBBySnapshotParameters.SetOracleInstanceName(v.(string)) } if v, has_v := d.GetOk("unique_name"); has_v { @@ -853,6 +870,9 @@ func helper_provision_by_snapshot(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOkExists("cdc_on_provision"); has_v { provisionVDBBySnapshotParameters.SetCdcOnProvision(v.(bool)) } + if v, has_v := d.GetOkExists("masked"); has_v { + provisionVDBBySnapshotParameters.SetMasked(v.(bool)) + } if v, has_v := d.GetOk("online_log_size"); has_v { provisionVDBBySnapshotParameters.SetOnlineLogSize(int32(v.(int))) } @@ -865,9 +885,6 @@ func helper_provision_by_snapshot(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOkExists("new_dbid"); has_v { provisionVDBBySnapshotParameters.SetNewDbid(v.(bool)) } - if v, has_v := d.GetOkExists("masked"); has_v { - provisionVDBBySnapshotParameters.SetMasked(v.(bool)) - } if v, has_v := d.GetOkExists("listener_ids"); has_v { provisionVDBBySnapshotParameters.SetListenerIds(toStringArray(v)) } @@ -971,7 +988,7 @@ func helper_provision_by_snapshot(ctx context.Context, d *schema.ResourceData, m provisionVDBBySnapshotParameters.SetOracleRacCustomEnvVars(toOracleRacCustomEnvVars(v)) } - req := client.VDBsApi.ProvisionVdbBySnapshot(ctx) + req := client.VDBsAPI.ProvisionVdbBySnapshot(ctx) apiRes, httpRes, err := req.ProvisionVDBBySnapshotParameters(*provisionVDBBySnapshotParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -1058,7 +1075,7 @@ func helper_provision_by_timestamp(ctx context.Context, d *schema.ResourceData, if v, has_v := d.GetOk("file_mapping_rules"); has_v { provisionVDBByTimestampParameters.SetFileMappingRules(v.(string)) } - if v, has_v := d.GetOk("oracle_instance_name"); has_v { + if v, has_v := d.GetOk("instance_name"); has_v { provisionVDBByTimestampParameters.SetOracleInstanceName(v.(string)) } if v, has_v := d.GetOk("unique_name"); has_v { @@ -1094,6 +1111,9 @@ func helper_provision_by_timestamp(ctx context.Context, d *schema.ResourceData, if v, has_v := d.GetOkExists("cdc_on_provision"); has_v { provisionVDBByTimestampParameters.SetCdcOnProvision(v.(bool)) } + if v, has_v := d.GetOkExists("masked"); has_v { + provisionVDBByTimestampParameters.SetMasked(v.(bool)) + } if v, has_v := d.GetOk("online_log_size"); has_v { provisionVDBByTimestampParameters.SetOnlineLogSize(int32(v.(int))) } @@ -1106,9 +1126,6 @@ func helper_provision_by_timestamp(ctx context.Context, d *schema.ResourceData, if v, has_v := d.GetOkExists("new_dbid"); has_v { provisionVDBByTimestampParameters.SetNewDbid(v.(bool)) } - if v, has_v := d.GetOkExists("masked"); has_v { - provisionVDBByTimestampParameters.SetMasked(v.(bool)) - } if v, has_v := d.GetOk("listener_ids"); has_v { provisionVDBByTimestampParameters.SetListenerIds(toStringArray(v)) } @@ -1219,7 +1236,7 @@ func helper_provision_by_timestamp(ctx context.Context, d *schema.ResourceData, provisionVDBByTimestampParameters.SetOracleRacCustomEnvVars(toOracleRacCustomEnvVars(v)) } - req := client.VDBsApi.ProvisionVdbByTimestamp(ctx) + req := client.VDBsAPI.ProvisionVdbByTimestamp(ctx) apiRes, httpRes, err := req.ProvisionVDBByTimestampParameters(*provisionVDBByTimestampParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -1302,7 +1319,7 @@ func helper_provision_by_bookmark(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOk("file_mapping_rules"); has_v { provisionVDBFromBookmarkParameters.SetFileMappingRules(v.(string)) } - if v, has_v := d.GetOk("oracle_instance_name"); has_v { + if v, has_v := d.GetOk("instance_name"); has_v { provisionVDBFromBookmarkParameters.SetOracleInstanceName(v.(string)) } if v, has_v := d.GetOk("unique_name"); has_v { @@ -1338,6 +1355,9 @@ func helper_provision_by_bookmark(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOkExists("cdc_on_provision"); has_v { provisionVDBFromBookmarkParameters.SetCdcOnProvision(v.(bool)) } + if v, has_v := d.GetOkExists("masked"); has_v { + provisionVDBFromBookmarkParameters.SetMasked(v.(bool)) + } if v, has_v := d.GetOk("online_log_size"); has_v { provisionVDBFromBookmarkParameters.SetOnlineLogSize(int32(v.(int))) } @@ -1350,9 +1370,6 @@ func helper_provision_by_bookmark(ctx context.Context, d *schema.ResourceData, m if v, has_v := d.GetOkExists("new_dbid"); has_v { provisionVDBFromBookmarkParameters.SetNewDbid(v.(bool)) } - if v, has_v := d.GetOkExists("masked"); has_v { - provisionVDBFromBookmarkParameters.SetMasked(v.(bool)) - } if v, has_v := d.GetOk("listener_ids"); has_v { provisionVDBFromBookmarkParameters.SetListenerIds(toStringArray(v)) } @@ -1452,7 +1469,7 @@ func helper_provision_by_bookmark(ctx context.Context, d *schema.ResourceData, m provisionVDBFromBookmarkParameters.SetOracleRacCustomEnvVars(toOracleRacCustomEnvVars(v)) } - req := client.VDBsApi.ProvisionVdbFromBookmark(ctx) + req := client.VDBsAPI.ProvisionVdbFromBookmark(ctx) apiRes, httpRes, err := req.ProvisionVDBFromBookmarkParameters(*provisionVDBFromBookmarkParameters).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -1520,12 +1537,18 @@ func resourceVdbRead(ctx context.Context, d *schema.ResourceData, meta interface vdbId := d.Id() res, diags := PollForObjectExistence(ctx, func() (interface{}, *http.Response, error) { - return client.VDBsApi.GetVdbById(ctx, vdbId).Execute() + return client.VDBsAPI.GetVdbById(ctx, vdbId).Execute() }) + if res == nil { + tflog.Error(ctx, DLPX+ERROR+"VDB not found: "+vdbId+", removing from state. ") + d.SetId("") + return nil + } + if diags != nil { _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.VDBsApi.GetVdbById(ctx, vdbId).Execute() + return client.VDBsAPI.GetVdbById(ctx, vdbId).Execute() }) // This would imply error in poll for deletion so we just log and exit. if diags != nil { @@ -1552,8 +1575,36 @@ func resourceVdbRead(ctx context.Context, d *schema.ResourceData, meta interface d.Set("ip_address", result.GetIpAddress()) d.Set("fqdn", result.GetFqdn()) d.Set("parent_id", result.GetParentId()) + d.Set("parent_dsource_id", result.GetParentDsourceId()) + d.Set("root_parent_id", result.GetRootParentId()) d.Set("group_name", result.GetGroupName()) d.Set("creation_date", result.GetCreationDate().String()) + d.Set("instance_name", result.GetInstanceName()) + d.Set("pre_refresh", flattenHooks(result.GetHooks().PreRefresh)) + d.Set("post_refresh", flattenHooks(result.GetHooks().PostRefresh)) + d.Set("configure_clone", flattenHooks(result.GetHooks().ConfigureClone)) + d.Set("pre_snapshot", flattenHooks(result.GetHooks().PreSnapshot)) + d.Set("post_snapshot", flattenHooks(result.GetHooks().PostSnapshot)) + d.Set("pre_start", flattenHooks(result.GetHooks().PreStart)) + d.Set("post_start", flattenHooks(result.GetHooks().PostStart)) + d.Set("pre_stop", flattenHooks(result.GetHooks().PreStop)) + d.Set("post_stop", flattenHooks(result.GetHooks().PostStop)) + d.Set("pre_rollback", flattenHooks(result.GetHooks().PreRollback)) + d.Set("post_rollback", flattenHooks(result.GetHooks().PostRollback)) + d.Set("database_name", result.GetDatabaseName()) + d.Set("tags", flattenTags(result.GetTags())) + d.Set("vdb_restart", result.GetVdbRestart()) + + _, is_provision := d.GetOk("provision_type") + if !is_provision { + // its an import, set to default value + d.Set("provision_type", "snapshot") + } + + d.Set("jdbc_connection_string", result.GetJdbcConnectionString()) + d.Set("cdb_id", result.GetCdbId()) + d.Set("template_id", result.GetTemplateId()) + d.Set("mount_point", result.GetMountPoint()) appdata_source_params, _ := json.Marshal(result.GetAppdataSourceParams()) d.Set("appdata_source_params", string(appdata_source_params)) @@ -1562,6 +1613,7 @@ func resourceVdbRead(ctx context.Context, d *schema.ResourceData, meta interface config_params, _ := json.Marshal(result.GetConfigParams()) d.Set("config_params", string(config_params)) d.Set("additional_mount_points", flattenAdditionalMountPoints(result.GetAdditionalMountPoints())) + d.Set("id", vdbId) return diags @@ -1573,68 +1625,210 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa client := meta.(*apiClient).client updateVDBParam := dctapi.NewUpdateVDBParameters() + vdbId := d.Get("id").(string) + // get the changed keys changedKeys := make([]string, 0, len(d.State().Attributes)) for k := range d.State().Attributes { + if strings.Contains(k, "tags") { // this is because the changed keys are of the form tag.0.keydi + k = "tags" + } + if strings.Contains(k, "pre_refresh") { + k = "pre_refresh" + } + if strings.Contains(k, "post_refresh") { + k = "post_refresh" + } + if strings.Contains(k, "configure_clone") { + k = "configure_clone" + } + if strings.Contains(k, "pre_snapshot") { + k = "pre_snapshot" + } + if strings.Contains(k, "post_snapshot") { + k = "post_snapshot" + } + if strings.Contains(k, "pre_rollback") { + k = "pre_rollback" + } + if strings.Contains(k, "post_rollback") { + k = "post_rollback" + } + if strings.Contains(k, "pre_start") { + k = "pre_start" + } + if strings.Contains(k, "post_start") { + k = "post_start" + } + if strings.Contains(k, "pre_stop") { + k = "pre_stop" + } + if strings.Contains(k, "post_stop") { + k = "post_stop" + } + if strings.Contains(k, "additional_mount_points") { + k = "additional_mount_points" + } + if strings.Contains(k, "listener_ids") { + k = "listener_ids" + } if d.HasChange(k) { + tflog.Debug(ctx, "changed keys"+k) changedKeys = append(changedKeys, k) } } - if d.HasChanges( - "auto_select_repository", - "source_data_id", - "id", - "database_type", - "database_version", - "status", - "ip_address", - "fqdn", - "parent_id", - "group_name", - "creation_date", - "target_group_id", - "database_name", - "truncate_log_on_checkpoint", - "repository_id", - "pre_refresh", - "post_refresh", - "pre_rollback", - "post_rollback", - "configure_clone", - "pre_snapshot", - "post_snapshot", - "pre_start", - "post_start", - "pre_stop", - "post_stop", - "file_mapping_rules", - "oracle_instance_name", - "unique_name", - "mount_point", - "masked", - "open_reset_logs", - "snapshot_policy_id", - "retention_policy_id", - "recovery_model", - "online_log_groups", - "online_log_size", - "os_username", - "os_password", - "archive_log", - "custom_env_vars", - "custom_env_files", - "timestamp", - "timestamp_in_database_timezone", - "snapshot_id") { + var updateFailure, destructiveUpdate bool = false, false + var nonUpdatableField []string - // revert and set the old value to the changed keys - for _, key := range changedKeys { - old, _ := d.GetChange(key) - d.Set(key, old) + // var vdbs []dctapi.VDB + // var vdbDiags diag.Diagnostics + + // if changedKeys contains non updatable field set a flag + for _, key := range changedKeys { + if !updatableVdbKeys[key] { + updateFailure = true + tflog.Debug(ctx, "non updatable field: "+key) + nonUpdatableField = append(nonUpdatableField, key) + } + } + + if updateFailure { + revertChanges(d, changedKeys) + return diag.Errorf("cannot update options %v. Please refer to provider documentation for updatable params.", nonUpdatableField) + } + + // find if destructive update + for _, key := range changedKeys { + if isDestructiveVdbUpdate[key] { + tflog.Debug(ctx, "destructive updates for: "+key) + destructiveUpdate = true + } + } + if destructiveUpdate { + if diags := disableVDB(ctx, client, vdbId); diags != nil { + tflog.Error(ctx, "failure in disabling vdbs") + revertChanges(d, changedKeys) + return diags + } + } + + nvdh := dctapi.NewVirtualDatasetHooks() + + if d.HasChange("pre_refresh") { + if v, has_v := d.GetOk("pre_refresh"); has_v { + nvdh.SetPreRefresh(toHookArray(v)) + } else { + nvdh.SetPreRefresh([]dctapi.Hook{}) } + } + + if d.HasChange("post_refresh") { + if v, has_v := d.GetOk("post_refresh"); has_v { + nvdh.SetPostRefresh(toHookArray(v)) + } else { + nvdh.SetPostRefresh([]dctapi.Hook{}) + } + } + + if d.HasChange("pre_rollback") { + if v, has_v := d.GetOk("pre_rollback"); has_v { + nvdh.SetPreRollback(toHookArray(v)) + } else { + nvdh.SetPreRollback([]dctapi.Hook{}) + } + } + + if d.HasChange("post_rollback") { + if v, has_v := d.GetOk("post_rollback"); has_v { + nvdh.SetPostRollback(toHookArray(v)) + } else { + nvdh.SetPostRollback([]dctapi.Hook{}) + } + } + + if d.HasChange("configure_clone") { + if v, has_v := d.GetOk("configure_clone"); has_v { + nvdh.SetConfigureClone(toHookArray(v)) + } else { + nvdh.SetConfigureClone([]dctapi.Hook{}) + } + } - return diag.Errorf("cannot update one (or more) of the options changed. Please refer to provider documentation for updatable params.") + if d.HasChange("pre_snapshot") { + if v, has_v := d.GetOk("pre_snapshot"); has_v { + nvdh.SetPreSnapshot(toHookArray(v)) + } else { + nvdh.SetPreSnapshot([]dctapi.Hook{}) + } + } + + if d.HasChange("post_snapshot") { + if v, has_v := d.GetOk("post_snapshot"); has_v { + nvdh.SetPostSnapshot(toHookArray(v)) + } else { + nvdh.SetPostSnapshot([]dctapi.Hook{}) + } + } + + if d.HasChange("pre_start") { + if v, has_v := d.GetOk("pre_start"); has_v { + nvdh.SetPreStart(toHookArray(v)) + } else { + nvdh.SetPreStart([]dctapi.Hook{}) + } + } + + if d.HasChange("post_start") { + if v, has_v := d.GetOk("post_start"); has_v { + nvdh.SetPostStart(toHookArray(v)) + } else { + nvdh.SetPostStart([]dctapi.Hook{}) + } + } + + if d.HasChange("pre_stop") { + if v, has_v := d.GetOk("pre_stop"); has_v { + nvdh.SetPreStop(toHookArray(v)) + } else { + nvdh.SetPreStop([]dctapi.Hook{}) + } + } + + if d.HasChange("post_stop") { + if v, has_v := d.GetOk("post_stop"); has_v { + nvdh.SetPostStop(toHookArray(v)) + } else { + nvdh.SetPostStop([]dctapi.Hook{}) + } + } + + if nvdh != nil { + updateVDBParam.SetHooks(*nvdh) + } + + if d.HasChange("mount_point") { + updateVDBParam.SetMountPoint(d.Get("mount_point").(string)) + } + + if d.HasChange("custom_env_files") { + if v, has_v := d.GetOk("custom_env_files"); has_v { + updateVDBParam.SetCustomEnvFiles(toStringArray(v)) + } else { + updateVDBParam.SetCustomEnvFiles([]string{}) + } + } + if d.HasChange("custom_env_vars") { + if v, has_v := d.GetOk("custom_env_vars"); has_v { + custom_env_vars := make(map[string]string) + + for k, v := range v.(map[string]interface{}) { + custom_env_vars[k] = v.(string) + } + updateVDBParam.SetCustomEnvVars(custom_env_vars) + } else { + updateVDBParam.SetCustomEnvVars(map[string]string{}) + } } if d.HasChange("template_id") { @@ -1704,14 +1898,11 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa updateVDBParam.SetConfigParams(config_params) } - res, httpRes, err := client.VDBsApi.UpdateVdbById(ctx, d.Get("id").(string)).UpdateVDBParameters(*updateVDBParam).Execute() + res, httpRes, err := client.VDBsAPI.UpdateVdbById(ctx, d.Get("id").(string)).UpdateVDBParameters(*updateVDBParam).Execute() if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil { // revert and set the old value to the changed keys - for _, key := range changedKeys { - old, _ := d.GetChange(key) - d.Set(key, old) - } + revertChanges(d, changedKeys) return diags } @@ -1724,9 +1915,43 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa return diag.Errorf("[NOT OK] VDB-Update %s. JobId: %s / Error: %s", job_status, *res.Job.Id, job_err) } + if d.HasChanges( + "tags", + ) { // tags update + tflog.Debug(ctx, "updating tags") + if d.HasChange("tags") { + // delete old tag + tflog.Debug(ctx, "deleting old tags") + oldTag, newTag := d.GetChange("tags") + if len(toTagArray(oldTag)) != 0 { + tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTag)[0].GetKey()+" "+toTagArray(oldTag)[0].GetValue()) + deleteTag := *dctapi.NewDeleteTag() + tagDelResp, tagDelErr := client.VDBsAPI.DeleteVdbTags(ctx, vdbId).DeleteTag(deleteTag).Execute() + tflog.Debug(ctx, "tag delete response: "+tagDelResp.Status) + if diags := apiErrorResponseHelper(ctx, nil, tagDelResp, tagDelErr); diags != nil { + revertChanges(d, changedKeys) + updateFailure = true + } + } + // create tag + if len(toTagArray(newTag)) != 0 { + tflog.Info(ctx, "creating new tags") + _, httpResp, tagCrtErr := client.VDBsAPI.CreateVdbTags(ctx, vdbId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTag))).Execute() + if diags := apiErrorResponseHelper(ctx, nil, httpResp, tagCrtErr); diags != nil { + revertChanges(d, changedKeys) + return diags + } + } + } + } + if destructiveUpdate { + if diags := enableVDB(ctx, client, vdbId); diags != nil { + return diags //if failure should we enable + } + } + return diags } - func resourceVdbDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*apiClient).client @@ -1735,7 +1960,7 @@ func resourceVdbDelete(ctx context.Context, d *schema.ResourceData, meta interfa deleteVdbParams := dctapi.NewDeleteVDBParametersWithDefaults() deleteVdbParams.SetForce(false) - res, httpRes, err := client.VDBsApi.DeleteVdb(ctx, vdbId).DeleteVDBParameters(*deleteVdbParams).Execute() + res, httpRes, err := client.VDBsAPI.DeleteVdb(ctx, vdbId).DeleteVDBParameters(*deleteVdbParams).Execute() if diags := apiErrorResponseHelper(ctx, res, httpRes, err); diags != nil { return diags @@ -1751,7 +1976,7 @@ func resourceVdbDelete(ctx context.Context, d *schema.ResourceData, meta interfa } _, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) { - return client.VDBsApi.GetVdbById(ctx, vdbId).Execute() + return client.VDBsAPI.GetVdbById(ctx, vdbId).Execute() }) return diags diff --git a/internal/provider/resource_vdb_group.go b/internal/provider/resource_vdb_group.go index 4d13ffc..b7835c0 100644 --- a/internal/provider/resource_vdb_group.go +++ b/internal/provider/resource_vdb_group.go @@ -2,9 +2,10 @@ package provider import ( "context" + "github.com/hashicorp/terraform-plugin-log/tflog" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -44,9 +45,8 @@ func resourceVdbGroupCreate(ctx context.Context, d *schema.ResourceData, meta in client := meta.(*apiClient).client - apiRes, httpRes, err := client.VDBGroupsApi.CreateVdbGroup(ctx).CreateVDBGroupRequest(*dctapi.NewCreateVDBGroupRequest( + apiRes, httpRes, err := client.VDBGroupsAPI.CreateVdbGroup(ctx).CreateVDBGroupRequest(*dctapi.NewCreateVDBGroupRequest( d.Get("name").(string), - toStringArray(d.Get("vdb_ids")), )).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { @@ -71,7 +71,7 @@ func resourceVdbGroupRead(ctx context.Context, d *schema.ResourceData, meta inte vdbGroupId := d.Id() tflog.Info(ctx, DLPX+INFO+"VdbGroupId: "+vdbGroupId) - apiRes, httpRes, err := client.VDBGroupsApi.GetVdbGroup(ctx, vdbGroupId).Execute() + apiRes, httpRes, err := client.VDBGroupsAPI.GetVdbGroup(ctx, vdbGroupId).Execute() if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { return diags @@ -97,7 +97,7 @@ func resourceVdbGroupDelete(ctx context.Context, d *schema.ResourceData, meta in deleteVdbParams := dctapi.NewDeleteVDBParametersWithDefaults() deleteVdbParams.SetForce(false) - httpRes, err := client.VDBGroupsApi.DeleteVdbGroup(ctx, vdbGroupId).Execute() + httpRes, err := client.VDBGroupsAPI.DeleteVdbGroup(ctx, vdbGroupId).Execute() if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil { return diags diff --git a/internal/provider/resource_vdb_group_test.go b/internal/provider/resource_vdb_group_test.go index a21f47e..83e8738 100644 --- a/internal/provider/resource_vdb_group_test.go +++ b/internal/provider/resource_vdb_group_test.go @@ -64,7 +64,7 @@ func testAccCheckDctVdbGroupResourceExists(vdbResourceName string, vdbGroupResou client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.VDBGroupsApi.GetVdbGroup(context.Background(), vdbGroupId).Execute() + res, _, err := client.VDBGroupsAPI.GetVdbGroup(context.Background(), vdbGroupId).Execute() if err != nil { return err } @@ -88,7 +88,7 @@ func testAccCheckVdbGroupDestroy(s *terraform.State) error { vdbGroupId := rs.Primary.ID - _, httpResp, _ := client.VDBGroupsApi.GetVdbGroup(context.Background(), vdbGroupId).Execute() + _, httpResp, _ := client.VDBGroupsAPI.GetVdbGroup(context.Background(), vdbGroupId).Execute() if httpResp == nil { return fmt.Errorf("VDB Group has not been deleted") } diff --git a/internal/provider/resource_vdb_test.go b/internal/provider/resource_vdb_test.go index fa8af22..72847b7 100644 --- a/internal/provider/resource_vdb_test.go +++ b/internal/provider/resource_vdb_test.go @@ -9,7 +9,7 @@ import ( "regexp" "testing" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -138,7 +138,7 @@ func testAccCheckDctVDBBookmarkConfigBasic() string { provisionVDBBySnapshotParameters.SetAutoSelectRepository(true) provisionVDBBySnapshotParameters.SetSourceDataId(os.Getenv("DATASOURCE_ID")) - vdb_req := client.VDBsApi.ProvisionVdbBySnapshot(context.Background()) + vdb_req := client.VDBsAPI.ProvisionVdbBySnapshot(context.Background()) vdb_res, vdb_http_res, vdb_err := vdb_req.ProvisionVDBBySnapshotParameters(*provisionVDBBySnapshotParameters).Execute() if diags := apiErrorResponseHelper(context.Background(), vdb_res, vdb_http_res, vdb_err); diags != nil { @@ -160,7 +160,7 @@ func testAccCheckDctVDBBookmarkConfigBasic() string { bookmark.SetVdbIds([]string{vdb_id}) bookmark.SetName(acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - bookmark_req := client.BookmarksApi.CreateBookmark(context.Background()).BookmarkCreateParameters(*bookmark) + bookmark_req := client.BookmarksAPI.CreateBookmark(context.Background()).BookmarkCreateParameters(*bookmark) bk_res, bk_http_res, bk_err := bookmark_req.Execute() if diags := apiErrorResponseHelper(context.Background(), bk_res, bk_http_res, bk_err); diags != nil { @@ -206,7 +206,7 @@ func testAccCheckDctVdbResourceExists(n string) resource.TestCheckFunc { client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.VDBsApi.GetVdbById(context.Background(), vdbId).Execute() + res, _, err := client.VDBsAPI.GetVdbById(context.Background(), vdbId).Execute() if err != nil { return err @@ -236,7 +236,7 @@ func testAccCheckDctAppDataVdbResourceExists(n string) resource.TestCheckFunc { client := testAccProvider.Meta().(*apiClient).client - res, _, err := client.VDBsApi.GetVdbById(context.Background(), vdbId).Execute() + res, _, err := client.VDBsAPI.GetVdbById(context.Background(), vdbId).Execute() if err != nil { return err @@ -266,13 +266,13 @@ func testAccCheckDctVdbBookmarkResourceExists() resource.TestCheckFunc { client := testAccProvider.Meta().(*apiClient).client - get_vdb_response, _, get_vdb_error := client.VDBsApi.GetVdbById(context.Background(), vdbId).Execute() + get_vdb_response, _, get_vdb_error := client.VDBsAPI.GetVdbById(context.Background(), vdbId).Execute() if get_vdb_error != nil { return get_vdb_error } - get_bookmark_response, _, get_bookmark_error := client.BookmarksApi.GetBookmarkById(context.Background(), bookmark_id).Execute() + get_bookmark_response, _, get_bookmark_error := client.BookmarksAPI.GetBookmarkById(context.Background(), bookmark_id).Execute() if get_bookmark_error != nil { return get_bookmark_error @@ -298,7 +298,7 @@ func testAccCheckVdbDestroy(s *terraform.State) error { vdbId := rs.Primary.ID - _, httpResp, _ := client.VDBsApi.GetVdbById(context.Background(), vdbId).Execute() + _, httpResp, _ := client.VDBsAPI.GetVdbById(context.Background(), vdbId).Execute() if httpResp == nil { return fmt.Errorf("VDB has not been deleted") @@ -318,7 +318,7 @@ func testAccCheckVdbDestroyBookmark(s *terraform.State) error { print("Deleting parent vdb " + vdb_id) deleteVdbParams := dctapi.NewDeleteVDBParametersWithDefaults() deleteVdbParams.SetForce(false) - client.VDBsApi.DeleteVdb(context.Background(), vdb_id).DeleteVDBParameters(*deleteVdbParams).Execute() + client.VDBsAPI.DeleteVdb(context.Background(), vdb_id).DeleteVDBParameters(*deleteVdbParams).Execute() return testAccCheckVdbDestroy(s) } diff --git a/internal/provider/utility.go b/internal/provider/utility.go index 95fc075..54060d1 100644 --- a/internal/provider/utility.go +++ b/internal/provider/utility.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - dctapi "github.com/delphix/dct-sdk-go/v14" + dctapi "github.com/delphix/dct-sdk-go/v22" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -21,7 +21,7 @@ var SLEEP_TIME = 10 // Returns the status of the given JOB-ID and Error body as a string func PollJobStatus(job_id string, ctx context.Context, client *dctapi.APIClient) (string, string) { - res, httpRes, err := client.JobsApi.GetJobById(ctx, job_id).Execute() + res, httpRes, err := client.JobsAPI.GetJobById(ctx, job_id).Execute() if err != nil { resBody, resBodyErr := ResponseBodyToString(ctx, httpRes.Body) if resBodyErr != nil { @@ -35,7 +35,7 @@ func PollJobStatus(job_id string, ctx context.Context, client *dctapi.APIClient) var i = 0 for res.GetStatus() == Pending || res.GetStatus() == Started { time.Sleep(time.Duration(JOB_STATUS_SLEEP_TIME) * time.Second) - res, httpRes, err = client.JobsApi.GetJobById(ctx, job_id).Execute() + res, httpRes, err = client.JobsAPI.GetJobById(ctx, job_id).Execute() if err != nil { if httpRes == nil { return "", "Received nil response for Job ID " + job_id @@ -85,9 +85,13 @@ func PollForStatusCode(ctx context.Context, apiCall func() (interface{}, *http.R var httpRes *http.Response var err error for i := 0; maxRetry == 0 || i < maxRetry; i++ { - if res, httpRes, err = apiCall(); httpRes.StatusCode == statusCode { + res, httpRes, err = apiCall() + if httpRes.StatusCode == statusCode { tflog.Info(ctx, DLPX+INFO+"[OK] Breaking poll - Status "+strconv.Itoa(statusCode)+" reached.") return res, nil + } else if httpRes.StatusCode == http.StatusNotFound { + tflog.Info(ctx, DLPX+INFO+"[404 Not found] Breaking poll - Status "+strconv.Itoa(statusCode)+" reached.") + break } time.Sleep(time.Duration(STATUS_POLL_SLEEP_TIME) * time.Second) } @@ -152,6 +156,37 @@ func flattenAdditionalMountPoints(additional_mount_points []dctapi.AdditionalMou return make([]interface{}, 0) } +func flattenHooks(hooks []dctapi.Hook) []interface{} { + if hooks != nil { + returnedHooks := make([]interface{}, len(hooks)) + for i, hook := range hooks { + returnedHook := make(map[string]interface{}) + returnedHook["name"] = hook.GetName() + returnedHook["command"] = hook.GetCommand() + returnedHook["shell"] = hook.GetShell() + returnedHook["element_id"] = hook.GetElementId() + returnedHook["has_credentials"] = hook.GetHasCredentials() + returnedHooks[i] = returnedHook + } + return returnedHooks + } + return make([]interface{}, 0) +} + +func flattenTags(tags []dctapi.Tag) []interface{} { + if tags != nil { + returnedTags := make([]interface{}, len(tags)) + for i, tag := range tags { + returnedTag := make(map[string]interface{}) + returnedTag["key"] = tag.GetKey() + returnedTag["value"] = tag.GetValue() + returnedTags[i] = returnedTag + } + return returnedTags + } + return make([]interface{}, 0) +} + func apiErrorResponseHelper(ctx context.Context, res interface{}, httpRes *http.Response, err error) diag.Diagnostics { // Helper function to return Diagnostics object if there is // a failure during API call. @@ -183,7 +218,7 @@ func PollSnapshotStatus(d *schema.ResourceData, ctx context.Context, client *dct var api_err error maxAttempts := int(math.Round(float64(wait_time.(int)*60) / float64(STATUS_POLL_SLEEP_TIME))) for attempt := 1; attempt <= maxAttempts; attempt++ { - snapshotRes, _, api_err = client.DSourcesApi.GetDsourceSnapshots(ctx, d.Id()).Execute() + snapshotRes, _, api_err = client.DSourcesAPI.GetDsourceSnapshots(ctx, d.Id()).Execute() if api_err != nil { tflog.Error(ctx, DLPX+ERROR+"Error fetching dSource snapshots: "+api_err.Error()) break // Exit the loop on error to avoid unnecessary retries @@ -207,3 +242,60 @@ func PollSnapshotStatus(d *schema.ResourceData, ctx context.Context, client *dct } } } + +func disableVDB(ctx context.Context, client *dctapi.APIClient, vdbId string) diag.Diagnostics { + tflog.Info(ctx, DLPX+INFO+"Disable VDB "+vdbId) + disableVDBParam := dctapi.NewDisableVDBParameters() + apiRes, httpRes, err := client.VDBsAPI.DisableVdb(ctx, vdbId).DisableVDBParameters(*disableVDBParam).Execute() + if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { + return diags + } + job_res, job_err := PollJobStatus(*apiRes.Job.Id, ctx, client) + if job_err != "" { + tflog.Warn(ctx, DLPX+WARN+"VDB disable Job Polling failed. Error: "+job_err) + //return here + } + tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res) + if job_res == Failed || job_res == Canceled || job_res == Abandoned { + tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+*apiRes.Job.Id+"!") + return diag.Errorf("[NOT OK] Job %s %s with error %s", *apiRes.Job.Id, job_res, job_err) + } + return nil +} + +func enableVDB(ctx context.Context, client *dctapi.APIClient, vdbId string) diag.Diagnostics { + tflog.Info(ctx, DLPX+INFO+"Enable VDB "+vdbId) + enableVDBParam := dctapi.NewEnableVDBParameters() + apiRes, httpRes, err := client.VDBsAPI.EnableVdb(ctx, vdbId).EnableVDBParameters(*enableVDBParam).Execute() + if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil { + return diags + } + job_res, job_err := PollJobStatus(*apiRes.Job.Id, ctx, client) + if job_err != "" { + tflog.Warn(ctx, DLPX+WARN+"VDB enable Job Polling failed. Error: "+job_err) + } + tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res) + if job_res == Failed || job_res == Canceled || job_res == Abandoned { + tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+*apiRes.Job.Id+"!") + return diag.Errorf("[NOT OK] Job %s %s with error %s", *apiRes.Job.Id, job_res, job_err) + } + return nil +} + +func revertChanges(d *schema.ResourceData, changedKeys []string) { + for _, key := range changedKeys { + old, _ := d.GetChange(key) + d.Set(key, old) + } +} + +func toTagArray(array interface{}) []dctapi.Tag { + items := []dctapi.Tag{} + for _, item := range array.([]interface{}) { + item_map := item.(map[string]interface{}) + tag_item := dctapi.NewTag(item_map["key"].(string), item_map["value"].(string)) + + items = append(items, *tag_item) + } + return items +}